From 86dc7cc804d47d33aca7daf8ddf4107d6eaa5f3c Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 29 Nov 2023 13:34:25 -0500 Subject: [PATCH 001/777] Kafka init Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- .../assigned_hostgroups.local.map.yaml | 1 + pillar/kafka/nodes.sls | 30 +++++ pillar/logstash/nodes.sls | 2 +- pillar/top.sls | 10 ++ salt/allowed_states.map.jinja | 11 +- salt/docker/defaults.yaml | 8 ++ salt/docker/soc_docker.yaml | 1 + salt/firewall/containers.map.jinja | 5 + salt/firewall/defaults.yaml | 94 +++++++++++++ salt/firewall/soc_firewall.yaml | 64 +++++++++ salt/kafka/config.sls | 101 ++++++++++++++ salt/kafka/enabled.sls | 46 +++++++ salt/kafka/etc/server.properties.jinja | 123 ++++++++++++++++++ salt/kafka/init.sls | 9 ++ salt/kafka/sostatus.sls | 21 +++ salt/kafka/storage.sls | 31 +++++ .../sbin_jinja/so-kafka-generate-keystore | 16 +++ salt/logstash/defaults.yaml | 4 + salt/logstash/enabled.sls | 5 +- .../config/so/0800_input_kafka.conf.jinja | 26 ++++ .../config/so/0899_output_kafka.conf.jinja | 22 ++++ salt/logstash/soc_logstash.yaml | 2 + salt/manager/tools/sbin/so-firewall-minion | 3 + salt/manager/tools/sbin/so-kafka-clusterid | 22 ++++ salt/manager/tools/sbin/so-minion | 4 + salt/ssl/init.sls | 122 +++++++++++++++++ salt/ssl/remove.sls | 17 +++ salt/top.sls | 10 ++ salt/vars/kafkanode.map.jinja | 1 + setup/so-functions | 8 +- setup/so-setup | 10 ++ setup/so-whiptail | 5 +- 32 files changed, 828 insertions(+), 6 deletions(-) create mode 100644 pillar/kafka/nodes.sls create mode 100644 salt/kafka/config.sls create mode 100644 salt/kafka/enabled.sls create mode 100644 salt/kafka/etc/server.properties.jinja create mode 100644 salt/kafka/init.sls create mode 100644 salt/kafka/sostatus.sls create mode 100644 salt/kafka/storage.sls create mode 100644 salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore create mode 100644 salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja create mode 100644 salt/logstash/pipelines/config/so/0899_output_kafka.conf.jinja create mode 100644 salt/manager/tools/sbin/so-kafka-clusterid create mode 100644 salt/vars/kafkanode.map.jinja diff --git a/files/firewall/assigned_hostgroups.local.map.yaml b/files/firewall/assigned_hostgroups.local.map.yaml index 07f389af0..c6eb199c3 100644 --- a/files/firewall/assigned_hostgroups.local.map.yaml +++ b/files/firewall/assigned_hostgroups.local.map.yaml @@ -21,3 +21,4 @@ role: standalone: searchnode: sensor: + kafkanode: \ No newline at end of file diff --git a/pillar/kafka/nodes.sls b/pillar/kafka/nodes.sls new file mode 100644 index 000000000..a7d97ac9c --- /dev/null +++ b/pillar/kafka/nodes.sls @@ -0,0 +1,30 @@ +{% set current_kafkanodes = salt.saltutil.runner('mine.get', tgt='G@role:so-kafkanode', fun='network.ip_addrs', tgt_type='compound') %} +{% set pillar_kafkanodes = salt['pillar.get']('kafka:nodes', default={}, merge=True) %} + +{% set existing_ids = [] %} +{% for node in pillar_kafkanodes.values() %} + {% if node.get('id') %} + {% do existing_ids.append(node['nodeid']) %} + {% endif %} +{% endfor %} +{% set all_possible_ids = range(1, 256)|list %} + +{% set available_ids = [] %} +{% for id in all_possible_ids %} + {% if id not in existing_ids %} + {% do available_ids.append(id) %} + {% endif %} +{% endfor %} + +{% set final_nodes = pillar_kafkanodes.copy() %} + +{% for minionid, ip in current_kafkanodes.items() %} + {% set hostname = minionid.split('_')[0] %} + {% if hostname not in final_nodes %} + {% set new_id = available_ids.pop(0) %} + {% do final_nodes.update({hostname: {'nodeid': new_id, 'ip': ip[0]}}) %} + {% endif %} +{% endfor %} + +kafka: + nodes: {{ final_nodes|tojson }} \ No newline at end of file diff --git a/pillar/logstash/nodes.sls b/pillar/logstash/nodes.sls index 8d3bdab65..3b75a5cae 100644 --- a/pillar/logstash/nodes.sls +++ b/pillar/logstash/nodes.sls @@ -2,7 +2,7 @@ {% set cached_grains = salt.saltutil.runner('cache.grains', tgt='*') %} {% for minionid, ip in salt.saltutil.runner( 'mine.get', - tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-searchnode or G@role:so-heavynode or G@role:so-receiver or G@role:so-fleet ', + tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-searchnode or G@role:so-heavynode or G@role:so-receiver or G@role:so-fleet or G@role:so-kafkanode ', fun='network.ip_addrs', tgt_type='compound') | dictsort() %} diff --git a/pillar/top.sls b/pillar/top.sls index 4893c44f9..49e493ec8 100644 --- a/pillar/top.sls +++ b/pillar/top.sls @@ -65,6 +65,7 @@ base: - soctopus.adv_soctopus - minions.{{ grains.id }} - minions.adv_{{ grains.id }} + - kafka.nodes '*_sensor': - healthcheck.sensor @@ -241,6 +242,15 @@ base: - minions.{{ grains.id }} - minions.adv_{{ grains.id }} + '*_kafkanode': + - logstash.nodes + - logstash.soc_logstash + - logstash.adv_logstash + - minions.{{ grains.id }} + - minions.adv_{{ grains.id }} + - secrets + - kafka.nodes + '*_import': - secrets - elasticsearch.index_templates diff --git a/salt/allowed_states.map.jinja b/salt/allowed_states.map.jinja index a3c5c75ab..11dfde824 100644 --- a/salt/allowed_states.map.jinja +++ b/salt/allowed_states.map.jinja @@ -187,6 +187,15 @@ 'schedule', 'docker_clean' ], + 'so-kafkanode': [ + 'kafka', + 'logstash', + 'ssl', + 'telegraf', + 'firewall', + 'schedule', + 'docker_clean' + ], 'so-desktop': [ ], }, grain='role') %} @@ -203,7 +212,7 @@ {% do allowed_states.append('strelka') %} {% endif %} - {% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-import'] %} + {% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-import', 'so-kafkanode'] %} {% do allowed_states.append('elasticsearch') %} {% endif %} diff --git a/salt/docker/defaults.yaml b/salt/docker/defaults.yaml index e39feaf06..3155841c9 100644 --- a/salt/docker/defaults.yaml +++ b/salt/docker/defaults.yaml @@ -201,3 +201,11 @@ docker: custom_bind_mounts: [] extra_hosts: [] extra_env: [] + 'so-kafka': + final_octet: 88 + port_bindings: + - 0.0.0.0:9092:9092 + - 0.0.0.0:2181:2181 + custom_bind_mounts: [] + extra_hosts: [] + extra_env: [] \ No newline at end of file diff --git a/salt/docker/soc_docker.yaml b/salt/docker/soc_docker.yaml index d227a3e85..87751010e 100644 --- a/salt/docker/soc_docker.yaml +++ b/salt/docker/soc_docker.yaml @@ -68,3 +68,4 @@ docker: so-steno: *dockerOptions so-suricata: *dockerOptions so-zeek: *dockerOptions + so-kafka: *dockerOptions \ No newline at end of file diff --git a/salt/firewall/containers.map.jinja b/salt/firewall/containers.map.jinja index 617b4a216..b19f66355 100644 --- a/salt/firewall/containers.map.jinja +++ b/salt/firewall/containers.map.jinja @@ -87,6 +87,11 @@ 'so-logstash', 'so-redis', ] %} +{% elif GLOBALS.role == 'so-kafkanode' %} +{% set NODE_CONTAINERS = [ + 'so-logstash', + 'so-kafka', +] %} {% elif GLOBALS.role == 'so-idh' %} {% set NODE_CONTAINERS = [ diff --git a/salt/firewall/defaults.yaml b/salt/firewall/defaults.yaml index ff127c419..112e0eaaa 100644 --- a/salt/firewall/defaults.yaml +++ b/salt/firewall/defaults.yaml @@ -19,6 +19,7 @@ firewall: manager: [] managersearch: [] receiver: [] + kafkanode: [] searchnode: [] self: [] sensor: [] @@ -90,6 +91,11 @@ firewall: tcp: - 8086 udp: [] + kafka: + tcp: + - 9092 + - 9093 + udp: [] kibana: tcp: - 5601 @@ -441,6 +447,15 @@ firewall: - elastic_agent_data - elastic_agent_update - sensoroni + kafkanode: + portgroups: + - yum + - docker_registry + - influxdb + - elastic_agent_control + - elastic_agent_data + - elastic_agent_update + - sensoroni analyst: portgroups: - nginx @@ -513,6 +528,9 @@ firewall: receiver: portgroups: - salt_manager + kafkanode: + portgroups: + - salt_manager desktop: portgroups: - salt_manager @@ -629,6 +647,15 @@ firewall: - elastic_agent_data - elastic_agent_update - sensoroni + kafkanode: + portgroups: + - yum + - docker_registry + - influxdb + - elastic_agent_control + - elastic_agent_data + - elastic_agent_update + - sensoroni analyst: portgroups: - nginx @@ -1339,6 +1366,73 @@ firewall: portgroups: [] customhostgroup9: portgroups: [] + kafkanode: + chain: + DOCKER-USER: + hostgroups: + searchnode: + portgroups: + - kafka + kafkanode: + portgroups: + - kafka + customhostgroup0: + portgroups: [] + customhostgroup1: + portgroups: [] + customhostgroup2: + portgroups: [] + customhostgroup3: + portgroups: [] + customhostgroup4: + portgroups: [] + customhostgroup5: + portgroups: [] + customhostgroup6: + portgroups: [] + customhostgroup7: + portgroups: [] + customhostgroup8: + portgroups: [] + customhostgroup9: + portgroups: [] + INPUT: + hostgroups: + anywhere: + portgroups: + - ssh + dockernet: + portgroups: + - all + localhost: + portgroups: + - all + self: + portgroups: + - syslog + syslog: + portgroups: + - syslog + customhostgroup0: + portgroups: [] + customhostgroup1: + portgroups: [] + customhostgroup2: + portgroups: [] + customhostgroup3: + portgroups: [] + customhostgroup4: + portgroups: [] + customhostgroup5: + portgroups: [] + customhostgroup6: + portgroups: [] + customhostgroup7: + portgroups: [] + customhostgroup8: + portgroups: [] + customhostgroup9: + portgroups: [] idh: chain: DOCKER-USER: diff --git a/salt/firewall/soc_firewall.yaml b/salt/firewall/soc_firewall.yaml index 209484b6e..7d250737a 100644 --- a/salt/firewall/soc_firewall.yaml +++ b/salt/firewall/soc_firewall.yaml @@ -34,6 +34,7 @@ firewall: heavynode: *hostgroupsettings idh: *hostgroupsettings import: *hostgroupsettings + kafkanode: *hostgroupsettings localhost: *ROhostgroupsettingsadv manager: *hostgroupsettings managersearch: *hostgroupsettings @@ -115,6 +116,9 @@ firewall: influxdb: tcp: *tcpsettings udp: *udpsettings + kafka: + tcp: *tcpsettings + udp: *udpsettings kibana: tcp: *tcpsettings udp: *udpsettings @@ -363,6 +367,8 @@ firewall: portgroups: *portgroupsdocker endgame: portgroups: *portgroupsdocker + kafkanode: + portgroups: *portgroupsdocker analyst: portgroups: *portgroupsdocker desktop: @@ -454,6 +460,8 @@ firewall: portgroups: *portgroupsdocker syslog: portgroups: *portgroupsdocker + kafkanode: + portgroups: *portgroupsdocker analyst: portgroups: *portgroupsdocker desktop: @@ -938,6 +946,62 @@ firewall: portgroups: *portgroupshost customhostgroup9: portgroups: *portgroupshost + kafkanode: + chain: + DOCKER-USER: + hostgroups: + searchnode: + portgroups: *portgroupsdocker + kafkanode: + portgroups: *portgroupsdocker + customhostgroup0: + portgroups: *portgroupsdocker + customhostgroup1: + portgroups: *portgroupsdocker + customhostgroup2: + portgroups: *portgroupsdocker + customhostgroup3: + portgroups: *portgroupsdocker + customhostgroup4: + portgroups: *portgroupsdocker + customhostgroup5: + portgroups: *portgroupsdocker + customhostgroup6: + portgroups: *portgroupsdocker + customhostgroup7: + portgroups: *portgroupsdocker + customhostgroup8: + portgroups: *portgroupsdocker + customhostgroup9: + portgroups: *portgroupsdocker + INPUT: + hostgroups: + anywhere: + portgroups: *portgroupshost + dockernet: + portgroups: *portgroupshost + localhost: + portgroups: *portgroupshost + customhostgroup0: + portgroups: *portgroupshost + customhostgroup1: + portgroups: *portgroupshost + customhostgroup2: + portgroups: *portgroupshost + customhostgroup3: + portgroups: *portgroupshost + customhostgroup4: + portgroups: *portgroupshost + customhostgroup5: + portgroups: *portgroupshost + customhostgroup6: + portgroups: *portgroupshost + customhostgroup7: + portgroups: *portgroupshost + customhostgroup8: + portgroups: *portgroupshost + customhostgroup9: + portgroups: *portgroupshost idh: chain: diff --git a/salt/kafka/config.sls b/salt/kafka/config.sls new file mode 100644 index 000000000..8caaa01cd --- /dev/null +++ b/salt/kafka/config.sls @@ -0,0 +1,101 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% from 'allowed_states.map.jinja' import allowed_states %} +{% if sls.split('.')[0] in allowed_states %} +{% from 'vars/globals.map.jinja' import GLOBALS %} + +{% set kafka_ips_logstash = [] %} +{% set kafka_ips_kraft = [] %} +{% set kafkanodes = salt['pillar.get']('kafka:nodes', {}) %} +{% set kafka_nodeid = salt['pillar.get']('kafka:nodes:' ~ GLOBALS.hostname ~ ':nodeid') %} +{% set kafka_ip = GLOBALS.node_ip %} + +{% set nodes = salt['pillar.get']('kafka:nodes', {}) %} +{% set combined = [] %} +{% for hostname, data in nodes.items() %} + {% do combined.append(data.nodeid ~ "@" ~ hostname) %} +{% endfor %} +{% set kraft_controller_quorum_voters = ','.join(combined) %} + +{# Create list for kafka <-> logstash/searchnode communcations #} +{% for node, node_data in kafkanodes.items() %} +{% do kafka_ips_logstash.append(node_data['ip'] + ":9092") %} +{% endfor %} +{% set kafka_server_list = "','".join(kafka_ips_logstash) %} + +{# Create a list for kraft controller <-> kraft controller communications. Used for Kafka metadata management #} +{% for node, node_data in kafkanodes.items() %} +{% do kafka_ips_kraft.append(node_data['nodeid'] ~ "@" ~ node_data['ip'] ~ ":9093") %} +{% endfor %} +{% set kraft_server_list = "','".join(kafka_ips_kraft) %} + + +include: + - ssl + +kafka_group: + group.present: + - name: kafka + - gid: 960 + +kafka: + user.present: + - uid: 960 + - gid: 960 + +{# Future tools to query kafka directly / show consumer groups +kafka_sbin_tools: + file.recurse: + - name: /usr/sbin + - source: salt://kafka/tools/sbin + - user: 960 + - group: 960 + - file_mode: 755 #} + +kakfa_log_dir: + file.directory: + - name: /opt/so/log/kafka + - user: 960 + - group: 960 + - makedirs: True + +kafka_data_dir: + file.directory: + - name: /nsm/kafka/data + - user: 960 + - group: 960 + - makedirs: True + +{# When docker container is created an added to registry. Update so-kafka-generate-keystore script #} +kafka_keystore_script: + cmd.script: + - source: salt://kafka/tools/sbin_jinja/so-kafka-generate-keystore + - tempalte: jinja + - cwd: /opt/so + - defaults: + GLOBALS: {{ GLOBALS }} + +kafka_kraft_server_properties: + file.managed: + - source: salt://kafka/etc/server.properties.jinja + - name: /opt/so/conf/kafka/server.properties + - template: jinja + - defaults: + kafka_nodeid: {{ kafka_nodeid }} + kraft_controller_quorum_voters: {{ kraft_controller_quorum_voters }} + kafka_ip: {{ kafka_ip }} + - user: 960 + - group: 960 + - makedirs: True + - show_changes: False + +{% else %} + +{{sls}}_state_not_allowed: + test.fail_without_changes: + - name: {{sls}}_state_not_allowed + +{% endif %} \ No newline at end of file diff --git a/salt/kafka/enabled.sls b/salt/kafka/enabled.sls new file mode 100644 index 000000000..1bf7dcf8b --- /dev/null +++ b/salt/kafka/enabled.sls @@ -0,0 +1,46 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% from 'allowed_states.map.jinja' import allowed_states %} +{% if sls.split('.')[0] in allowed_states %} +{% from 'vars/globals.map.jinja' import GLOBALS %} +{% from 'docker/docker.map.jinja' import DOCKER %} + +include: + - kafka.sostatus + - kafka.config + - kafka.storage + +so-kafka: + docker_container.running: + - image: so-kafka + - hostname: so-kafka + - name: so-kafka + - networks: + - sobridge: + - ipv4_address: {{ DOCKER.containers['so-kafka'].ip }} + - user: kafka + - port_bindings: + {% for BINDING in DOCKER.containers['so-kafka'].port_bindings %} + - {{ BINDING }} + {% endfor %} + - binds: + - /etc/pki/kafka.jks:/etc/pki/kafka.jks + - /opt/so/conf/ca/cacerts:/etc/pki/java/sos/cacerts + - /nsm/kafka/data/:/nsm/kafka/data/:rw + - /opt/so/conf/kafka/server.properties:/kafka/config/kraft/server.properties + +delete_so-kafka_so-status.disabled: + file.uncomment: + - name: /opt/so/conf/so-status/so-status.conf + - regex: ^so-kafka$ + +{% else %} + +{{sls}}_state_not_allowed: + test.fail_without_changes: + - name: {{sls}}_state_not_allowed + +{% endif %} \ No newline at end of file diff --git a/salt/kafka/etc/server.properties.jinja b/salt/kafka/etc/server.properties.jinja new file mode 100644 index 000000000..ad5ac67a9 --- /dev/null +++ b/salt/kafka/etc/server.properties.jinja @@ -0,0 +1,123 @@ +# This configuration file is intended for use in KRaft mode, where +# Apache ZooKeeper is not present. See config/kraft/README.md for details. +# + +############################# Server Basics ############################# + +# The role of this server. Setting this puts us in KRaft mode +process.roles=broker,controller + +# The node id associated with this instance's roles +node.id={{ kafka_nodeid }} + +# The connect string for the controller quorum +controller.quorum.voters={{ kraft_controller_quorum_voters }} + +############################# Socket Server Settings ############################# + +# The address the socket server listens on. +# Combined nodes (i.e. those with `process.roles=broker,controller`) must list the controller listener here at a minimum. +# If the broker listener is not defined, the default listener will use a host name that is equal to the value of java.net.InetAddress.getCanonicalHostName(), +# with PLAINTEXT listener name, and port 9092. +# FORMAT: +# listeners = listener_name://host_name:port +# EXAMPLE: +# listeners = PLAINTEXT://your.host.name:9092 +listeners=BROKER://{{ kafka_ip }}:9092,CONTROLLER://{{ kafka_ip }}:9093 + +# Name of listener used for communication between brokers. +inter.broker.listener.name=BROKER + +# Listener name, hostname and port the broker will advertise to clients. +# If not set, it uses the value for "listeners". +advertised.listeners=BROKER://{{ kafka_ip }}:9092 + +# A comma-separated list of the names of the listeners used by the controller. +# If no explicit mapping set in `listener.security.protocol.map`, default will be using PLAINTEXT protocol +# This is required if running in KRaft mode. +controller.listener.names=CONTROLLER + +# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details +listener.security.protocol.map=CONTROLLER:SSL,BROKER:SSL + +#SSL configuration +ssl.keystore.location=/etc/pki/kafka.jks +ssl.keystore.pasword=changeit +ssl.keystore.type=JKS +ssl.truststore.location=/etc/pki/java/sos/cacerts +ssl.truststore.password=changeit + +# The number of threads that the server uses for receiving requests from the network and sending responses to the network +num.network.threads=3 + +# The number of threads that the server uses for processing requests, which may include disk I/O +num.io.threads=8 + +# The send buffer (SO_SNDBUF) used by the socket server +socket.send.buffer.bytes=102400 + +# The receive buffer (SO_RCVBUF) used by the socket server +socket.receive.buffer.bytes=102400 + +# The maximum size of a request that the socket server will accept (protection against OOM) +socket.request.max.bytes=104857600 + + +############################# Log Basics ############################# + +# A comma separated list of directories under which to store log files +log.dirs=/nsm/kafka/data + +# The default number of log partitions per topic. More partitions allow greater +# parallelism for consumption, but this will also result in more files across +# the brokers. +num.partitions=1 + +# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. +# This value is recommended to be increased for installations with data dirs located in RAID array. +num.recovery.threads.per.data.dir=1 + +############################# Internal Topic Settings ############################# +# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" +# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. +offsets.topic.replication.factor=1 +transaction.state.log.replication.factor=1 +transaction.state.log.min.isr=1 + +############################# Log Flush Policy ############################# + +# Messages are immediately written to the filesystem but by default we only fsync() to sync +# the OS cache lazily. The following configurations control the flush of data to disk. +# There are a few important trade-offs here: +# 1. Durability: Unflushed data may be lost if you are not using replication. +# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. +# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks. +# The settings below allow one to configure the flush policy to flush data after a period of time or +# every N messages (or both). This can be done globally and overridden on a per-topic basis. + +# The number of messages to accept before forcing a flush of data to disk +#log.flush.interval.messages=10000 + +# The maximum amount of time a message can sit in a log before we force a flush +#log.flush.interval.ms=1000 + +############################# Log Retention Policy ############################# + +# The following configurations control the disposal of log segments. The policy can +# be set to delete segments after a period of time, or after a given size has accumulated. +# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens +# from the end of the log. + +# The minimum age of a log file to be eligible for deletion due to age +log.retention.hours=168 + +# A size-based retention policy for logs. Segments are pruned from the log unless the remaining +# segments drop below log.retention.bytes. Functions independently of log.retention.hours. +#log.retention.bytes=1073741824 + +# The maximum size of a log segment file. When this size is reached a new log segment will be created. +log.segment.bytes=1073741824 + +# The interval at which log segments are checked to see if they can be deleted according +# to the retention policies +log.retention.check.interval.ms=300000 \ No newline at end of file diff --git a/salt/kafka/init.sls b/salt/kafka/init.sls new file mode 100644 index 000000000..653cd4b88 --- /dev/null +++ b/salt/kafka/init.sls @@ -0,0 +1,9 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{# Create map.jinja to enable / disable kafka from UI #} +{# Temporarily just enable kafka #} +include: + - kafka.enabled diff --git a/salt/kafka/sostatus.sls b/salt/kafka/sostatus.sls new file mode 100644 index 000000000..4c7519964 --- /dev/null +++ b/salt/kafka/sostatus.sls @@ -0,0 +1,21 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% from 'allowed_states.map.jinja' import allowed_states %} +{% if sls.split('.')[0] in allowed_states %} + +append_so-kafka_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-kafka + - unless: grep -q so-kafka /opt/so/conf/so-status/so-status.conf + +{% else %} + +{{sls}}_state_not_allowed: + test.fail_without_changes: + - name: {{sls}}_state_not_allowed + +{% endif %} \ No newline at end of file diff --git a/salt/kafka/storage.sls b/salt/kafka/storage.sls new file mode 100644 index 000000000..dc114ef4f --- /dev/null +++ b/salt/kafka/storage.sls @@ -0,0 +1,31 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% from 'allowed_states.map.jinja' import allowed_states %} +{% if sls.split('.')[0] in allowed_states %} +{% from 'vars/globals.map.jinja' import GLOBALS %} +{% set kafka_cluster_id = salt['pillar.get']('secrets:kafka_cluster_id')%} + +{# Initialize kafka storage if it doesn't already exist. Just looking for meta.properties in /nsm/kafka/data #} +{% if salt['file.file_exists']('/nsm/kafka/data/meta.properties') %} +{% else %} +kafka_storage_init: + cmd.run: + - name: | + docker run -v /nsm/kafka/data:/nsm/kafka/data -v /opt/so/conf/kafka/server.properties:/kafka/config/kraft/newserver.properties --name so-kafkainit --user root --entrypoint /kafka/bin/kafka-storage.sh so-kafka format -t {{ kafka_cluster_id }} -c /kafka/config/kraft/server.properties +kafka_rm_kafkainit: + cmd.run: + - name: | + docker rm so-kafkainit +{% endif %} + + +{% else %} + +{{sls}}_state_not_allowed: + test.fail_without_changes: + - name: {{sls}}_state_not_allowed + +{% endif %} \ No newline at end of file diff --git a/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore b/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore new file mode 100644 index 000000000..69bb6ad87 --- /dev/null +++ b/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore @@ -0,0 +1,16 @@ +#!/bin/bash +# +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +. /usr/sbin/so-common + +if [ ! -f /etc/pki/kafka.jks ]; then + docker run -v /etc/pki/kafka.p12:/etc/pki/kafka.p12 --name so-kafka-keystore --user root --entrypoint keytool so-kafka -importkeystore -srckeystore /etc/pki/kafka.p12 -srcstoretype PKCS12 -srsstorepass changeit -destkeystore /etc/pki/kafka.jks -deststoretype JKS -deststorepass changeit -alias kafkastore -noprompt + docker cp so-kafka-keystore:/etc/pki/kafka.jks /etc/pki/kafka.jks + docker rm so-kafka-keystore +else + exit 0 +fi \ No newline at end of file diff --git a/salt/logstash/defaults.yaml b/salt/logstash/defaults.yaml index e4c18cc64..b7382090e 100644 --- a/salt/logstash/defaults.yaml +++ b/salt/logstash/defaults.yaml @@ -19,6 +19,8 @@ logstash: - search fleet: - fleet + kafkanode: + - kafkanode defined_pipelines: fleet: - so/0012_input_elastic_agent.conf.jinja @@ -37,6 +39,8 @@ logstash: - so/0900_input_redis.conf.jinja - so/9805_output_elastic_agent.conf.jinja - so/9900_output_endgame.conf.jinja + kafkanode: + - so/0899_output_kafka.conf.jinja custom0: [] custom1: [] custom2: [] diff --git a/salt/logstash/enabled.sls b/salt/logstash/enabled.sls index c76f81d21..96e29b25a 100644 --- a/salt/logstash/enabled.sls +++ b/salt/logstash/enabled.sls @@ -75,10 +75,13 @@ so-logstash: {% else %} - /etc/pki/tls/certs/intca.crt:/usr/share/filebeat/ca.crt:ro {% endif %} - {% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-searchnode'] %} + {% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-searchnode', 'so-kafkanode' ] %} - /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro - /opt/so/conf/ca/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro {% endif %} + {% if GLOBALS.role in ['so-kafkanode'] %} + - /etc/pki/kafka-logstash.p12:/usr/share/logstash/kafka-logstash.p12:ro + {% endif %} {% if GLOBALS.role == 'so-eval' %} - /nsm/zeek:/nsm/zeek:ro - /nsm/suricata:/suricata:ro diff --git a/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja b/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja new file mode 100644 index 000000000..c1429319a --- /dev/null +++ b/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja @@ -0,0 +1,26 @@ +{% set kafka_brokers = salt['pillar.get']('logstash:nodes:kafkanode', {}) %} +{% set broker_ips = [] %} +{% for node, node_data in kafka_brokers.items() %} + {% do broker_ips.append(node_data['ip'] + ":9092") %} +{% endfor %} + +{% set bootstrap_servers = "','".join(broker_ips) %} + + +#Run on searchnodes ingest kafka topic(s) group_id allows load balancing of event ingest to all searchnodes +input { + kafka { + codec => json + #Can ingest multiple topics. Set to a value from SOC UI? + topics => ['logstash-topic',] + group_id => 'searchnodes' + security_protocol => 'SSL' + bootstrap_servers => {{ bootstrap_servers }} + ssl_keystore_location => '/usr/share/logstash/kafka-logstash.p12' + ssl_keystore_password => '' + ssl_keystore_type => 'PKCS12' + ssl_truststore_location => '/etc/pki/ca-trust/extracted/java/cacerts' + # Set password as a pillar to avoid bad optics? This is default truststore for grid + ssl_truststore_password => 'changeit' + } +} \ No newline at end of file diff --git a/salt/logstash/pipelines/config/so/0899_output_kafka.conf.jinja b/salt/logstash/pipelines/config/so/0899_output_kafka.conf.jinja new file mode 100644 index 000000000..ff9a6f6ee --- /dev/null +++ b/salt/logstash/pipelines/config/so/0899_output_kafka.conf.jinja @@ -0,0 +1,22 @@ +{% set kafka_brokers = salt['pillar.get']('logstash:nodes:kafkanode', {}) %} +{% set broker_ips = [] %} +{% for node, node_data in kafka_brokers.items() %} + {% do broker_ips.append(node_data['ip'] + ":9092") %} +{% endfor %} + +{% set bootstrap_servers = "','".join(broker_ips) %} + +#Run on kafka broker logstash writes to topic 'logstash-topic' +output { + kafka { + codec => json + topic_id => 'logstash-topic' + bootstrap_servers => '{{ bootstrap_servers }}' + security_protocol => 'SSL' + ssl_keystore_location => '/usr/share/logstash/kafka-logstash.p12' + ssl_keystore_password => '' + ssl_keystore_type => 'PKCS12' + ssl_truststore_location => '/etc/pki/ca-trust/extracted/java/cacerts' + ssl_truststore_password => 'changeit' + } +} \ No newline at end of file diff --git a/salt/logstash/soc_logstash.yaml b/salt/logstash/soc_logstash.yaml index bcb99bad5..144094eb1 100644 --- a/salt/logstash/soc_logstash.yaml +++ b/salt/logstash/soc_logstash.yaml @@ -16,6 +16,7 @@ logstash: manager: *assigned_pipelines managersearch: *assigned_pipelines fleet: *assigned_pipelines + kafkanode: *assigned_pipelines defined_pipelines: receiver: &defined_pipelines description: List of pipeline configurations assign to this group. @@ -26,6 +27,7 @@ logstash: fleet: *defined_pipelines manager: *defined_pipelines search: *defined_pipelines + kafkanode: *defined_pipelines custom0: *defined_pipelines custom1: *defined_pipelines custom2: *defined_pipelines diff --git a/salt/manager/tools/sbin/so-firewall-minion b/salt/manager/tools/sbin/so-firewall-minion index 66a0afcea..3357e5185 100755 --- a/salt/manager/tools/sbin/so-firewall-minion +++ b/salt/manager/tools/sbin/so-firewall-minion @@ -79,6 +79,9 @@ fi 'RECEIVER') so-firewall includehost receiver "$IP" --apply ;; + 'KAFKANODE') + so-firewall includehost kafkanode "$IP" --apply + ;; 'DESKTOP') so-firewall includehost desktop "$IP" --apply ;; diff --git a/salt/manager/tools/sbin/so-kafka-clusterid b/salt/manager/tools/sbin/so-kafka-clusterid new file mode 100644 index 000000000..64833a0d2 --- /dev/null +++ b/salt/manager/tools/sbin/so-kafka-clusterid @@ -0,0 +1,22 @@ +#!/bin/bash + +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +local_salt_dir=/opt/so/saltstack/local + +if [[ -f /usr/sbin/so-common ]]; then + source /usr/sbin/so-common +else + source $(dirname $0)/../../../common/tools/sbin/so-common +fi + +if ! grep -q "^ kafka_cluster_id:" $local_salt_dir/pillar/secrets.sls; then + kafka_cluster_id=$(get_random_value 22) + echo ' kafka_cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/secrets.sls +else + echo 'kafka_cluster_id exists' + salt-call pillar.get secrets +fi \ No newline at end of file diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index edc0b1404..c61098589 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -556,6 +556,10 @@ function createRECEIVER() { add_telegraf_to_minion } +function createKAFKANODE() { + add_logstash_to_minion + # add_telegraf_to_minion +} function testConnection() { retry 15 3 "salt '$MINION_ID' test.ping" True diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index ef93a9072..2a71cd853 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -664,6 +664,128 @@ elastickeyperms: {%- endif %} +# Roles will need to be modified. Below is just for testing encrypted kafka pipelines +# Remove so-manager. Just inplace for testing +{% if grains['role'] in ['so-manager', 'so-kafkanode', 'so-searchnode'] %} +# Create a cert for Redis encryption +kafka_key: + x509.private_key_managed: + - name: /etc/pki/kafka.key + - keysize: 4096 + - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/kafka.key') -%} + - prereq: + - x509: /etc/pki/kafka.crt + {%- endif %} + - retry: + attempts: 5 + interval: 30 + +kafka_crt: + x509.certificate_managed: + - name: /etc/pki/kafka.crt + - ca_server: {{ ca_server }} + - subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }} + - signing_policy: elasticfleet + - private_key: /etc/pki/kafka.key + - CN: {{ GLOBALS.hostname }} + - days_remaining: 0 + - days_valid: 820 + - backup: True + - timeout: 30 + - retry: + attempts: 5 + interval: 30 + cmd.run: + - name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka.key -in /etc/pki/kafka.crt -export -out /etc/pki/kafka.p12 -nodes -passout pass:changeit" + - onchanges: + - x509: /etc/pki/kafka.key + +# Kafka needs a keystore so just creating a new key / cert for that purpose +etc_kafka_logstash_key: + x509.private_key_managed: + - name: /etc/pki/kafka-logstash.key + - keysize: 4096 + - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/kakfa-logstash.key') -%} + - prereq: + - x509: etc_kafka_logstash_crt + {%- endif %} + - retry: + attempts: 5 + interval: 30 + +etc_kafka_logstash_crt: + x509.certificate_managed: + - name: /etc/pki/kafka-logstash.crt + - ca_server: {{ ca_server }} + - signing_policy: elasticfleet + - private_key: /etc/pki/kafka-logstash.key + - CN: {{ GLOBALS.hostname }} + - subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }} + - days_remaining: 0 + - days_valid: 820 + - backup: True + - timeout: 30 + - retry: + attempts: 5 + interval: 30 + cmd.run: + - name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka-logstash.key -in /etc/pki/kafka-logstash.crt -export -out /etc/pki/kafka-logstash.p12 -nodes -passout pass:" + - onchanges: + - x509: etc_kafka_logstash_key + +kafka_key_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka.key + - mode: 640 + - user: 960 + - group: 939 + +kafka_crt_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka.crt + - mode: 640 + - user: 960 + - group: 939 + +kafka_logstash_cert_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka-logstash.crt + - mode: 640 + - user: 960 + - group: 939 + +kafka_logstash_key_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka-logstash.key + - mode: 640 + - user: 960 + - group: 939 + +kafka_logstash_keystore_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka-logstash.p12 + - mode: 640 + - user: 960 + - group: 939 + +kafka_keystore_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka.p12 + - mode: 640 + - user: 960 + - group: 939 + +{% endif %} {% else %} {{sls}}_state_not_allowed: diff --git a/salt/ssl/remove.sls b/salt/ssl/remove.sls index 43a245288..bb4562300 100644 --- a/salt/ssl/remove.sls +++ b/salt/ssl/remove.sls @@ -67,3 +67,20 @@ fleet_crt: fbcertdir: file.absent: - name: /opt/so/conf/filebeat/etc/pki + +kafka_crt: + file.absent: + - name: /etc/pki/kafka.crt +kafka_key: + file.absent: + - name: /etc/pki/kafka.key + +kafka_logstash_crt: + file.absent: + - name: /etc/pki/kafka-logstash.crt +kafka_logstash_key: + file.absent: + - name: /etc/pki/kafka-logstash.key +kafka_logstash_keystore: + file.absent: + - name: /etc/pki/kafka-logstash.p12 diff --git a/salt/top.sls b/salt/top.sls index 2323731a1..cd1b92e5c 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -255,6 +255,16 @@ base: - elasticfleet.install_agent_grid - docker_clean + '*_kafkanode and G@saltversion:{{saltversion}}': + - match: compound + - kafka + - logstash + - ssl + - telegraf + - firewall + - docker_clean + - elasticfleet.install_agent_grid + '*_idh and G@saltversion:{{saltversion}}': - match: compound - ssl diff --git a/salt/vars/kafkanode.map.jinja b/salt/vars/kafkanode.map.jinja new file mode 100644 index 000000000..396cefcc9 --- /dev/null +++ b/salt/vars/kafkanode.map.jinja @@ -0,0 +1 @@ +{% set ROLE_GLOBALS = {} %} \ No newline at end of file diff --git a/setup/so-functions b/setup/so-functions index fc0876248..76887c81c 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1242,6 +1242,7 @@ generate_passwords(){ REDISPASS=$(get_random_value) SOCSRVKEY=$(get_random_value 64) IMPORTPASS=$(get_random_value) + KAFKACLUSTERID=$(get_random_value 22) } generate_interface_vars() { @@ -1269,7 +1270,7 @@ get_redirect() { get_minion_type() { local minion_type case "$install_type" in - 'EVAL' | 'MANAGERSEARCH' | 'MANAGER' | 'SENSOR' | 'HEAVYNODE' | 'SEARCHNODE' | 'FLEET' | 'IDH' | 'STANDALONE' | 'IMPORT' | 'RECEIVER') + 'EVAL' | 'MANAGERSEARCH' | 'MANAGER' | 'SENSOR' | 'HEAVYNODE' | 'SEARCHNODE' | 'FLEET' | 'IDH' | 'STANDALONE' | 'IMPORT' | 'RECEIVER' | 'KAFKANODE') minion_type=$(echo "$install_type" | tr '[:upper:]' '[:lower:]') ;; esac @@ -1663,6 +1664,8 @@ process_installtype() { is_import=true elif [ "$install_type" = 'RECEIVER' ]; then is_receiver=true + elif [ "$install_type" = 'KAFKANODE' ]; then + is_kafka=true elif [ "$install_type" = 'DESKTOP' ]; then if [ "$setup_type" != 'desktop' ]; then exec bash so-setup desktop @@ -2105,7 +2108,8 @@ secrets_pillar(){ " playbook_automation: $PLAYBOOKAUTOMATIONPASS"\ " playbook_automation_api_key: "\ " import_pass: $IMPORTPASS"\ - " influx_pass: $INFLUXPASS" > $local_salt_dir/pillar/secrets.sls + " influx_pass: $INFLUXPASS"\ + " kafka_cluster_id: $KAFKACLUSTERID" > $local_salt_dir/pillar/secrets.sls fi } diff --git a/setup/so-setup b/setup/so-setup index 14d6b2304..bc64cd9d1 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -574,6 +574,16 @@ if ! [[ -f $install_opt_file ]]; then check_manager_connection set_minion_info whiptail_end_settings + + elif [[ $is_kafka ]]; then + info "Setting up as node type Kafka broker" + #check_requirements "kafka" + networking_needful + collect_mngr_hostname + add_mngr_ip_to_hosts + check_manager_connection + set_minion_info + whiptail_end_settings fi if [[ $waitforstate ]]; then diff --git a/setup/so-whiptail b/setup/so-whiptail index c55e2db8f..4553ebd33 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -640,13 +640,14 @@ whiptail_install_type_dist_existing() { Note: Heavy nodes (HEAVYNODE) are NOT recommended for most users. EOM - install_type=$(whiptail --title "$whiptail_title" --menu "$node_msg" 19 75 6 \ + install_type=$(whiptail --title "$whiptail_title" --menu "$node_msg" 19 75 7 \ "SENSOR" "Create a forward only sensor " \ "SEARCHNODE" "Add a search node with parsing " \ "FLEET" "Dedicated Elastic Fleet Node " \ "HEAVYNODE" "Sensor + Search Node " \ "IDH" "Intrusion Detection Honeypot Node " \ "RECEIVER" "Receiver Node " \ + "KAFKANODE" "Kafka Broker + Kraft controller" \ 3>&1 1>&2 2>&3 # "HOTNODE" "Add Hot Node (Uses Elastic Clustering)" \ # TODO # "WARMNODE" "Add Warm Node to existing Hot or Search node" \ # TODO @@ -677,6 +678,8 @@ whiptail_install_type_dist_existing() { is_import=true elif [ "$install_type" = 'RECEIVER' ]; then is_receiver=true + elif [ "$install_type" = 'KAFKANODE' ]; then + is_kafka=true elif [ "$install_type" = 'DESKTOP' ]; then if [ "$setup_type" != 'desktop' ]; then exec bash so-setup desktop From 80a3942245b291a8168815c6f486bba8fc66f586 Mon Sep 17 00:00:00 2001 From: Wes Date: Mon, 22 Jan 2024 20:15:48 +0000 Subject: [PATCH 002/777] Rename RITA pipelines --- salt/elasticsearch/files/ingest/{rita.beacon => rita.beacons} | 0 .../files/ingest/{rita.connection => rita.connections} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename salt/elasticsearch/files/ingest/{rita.beacon => rita.beacons} (100%) rename salt/elasticsearch/files/ingest/{rita.connection => rita.connections} (100%) diff --git a/salt/elasticsearch/files/ingest/rita.beacon b/salt/elasticsearch/files/ingest/rita.beacons similarity index 100% rename from salt/elasticsearch/files/ingest/rita.beacon rename to salt/elasticsearch/files/ingest/rita.beacons diff --git a/salt/elasticsearch/files/ingest/rita.connection b/salt/elasticsearch/files/ingest/rita.connections similarity index 100% rename from salt/elasticsearch/files/ingest/rita.connection rename to salt/elasticsearch/files/ingest/rita.connections From b08db3e05a7a038a69cf34d3511a729959e2c243 Mon Sep 17 00:00:00 2001 From: Wes Date: Mon, 22 Jan 2024 20:16:43 +0000 Subject: [PATCH 003/777] Add RITA policy --- .../grid-nodes_general/rita-logs.json | 34 +++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 salt/elasticfleet/files/integrations/grid-nodes_general/rita-logs.json diff --git a/salt/elasticfleet/files/integrations/grid-nodes_general/rita-logs.json b/salt/elasticfleet/files/integrations/grid-nodes_general/rita-logs.json new file mode 100644 index 000000000..4dc46e8e2 --- /dev/null +++ b/salt/elasticfleet/files/integrations/grid-nodes_general/rita-logs.json @@ -0,0 +1,34 @@ +{ + "package": { + "name": "log", + "version": "2.3.0" + }, + "name": "rita-logs", + "namespace": "so", + "description": "RITA Logs", + "policy_id": "so-grid-nodes_general", + "vars": {}, + "inputs": { + "logs-logfile": { + "enabled": true, + "streams": { + "log.logs": { + "enabled": true, + "vars": { + "paths": [ + "/nsm/rita/beacons.csv", + "/nsm/rita/exploded-dns.csv", + "/nsm/rita/long-connections.csv" + ], + "exclude_files": [], + "ignore_older": "72h", + "data_stream.dataset": "rita", + "tags": [], + "processors": "- dissect:\n tokenizer: \"/nsm/rita/%{pipeline}.csv\"\n field: \"log.file.path\"\n trim_chars: \".csv\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\").split(\"-\");\n if (pl.length > 1) {\n pl = pl[1];\n }\n else {\n pl = pl[0];\n }\n event.Put(\"@metadata.pipeline\", \"rita.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: rita", + "custom": "exclude_lines: ['^Score', '^Source', '^Domain', '^No results']" + } + } + } + } + } +} From 5542db0aac7d0c6467b028a284381597bbf8a350 Mon Sep 17 00:00:00 2001 From: Wes Date: Mon, 22 Jan 2024 21:07:46 +0000 Subject: [PATCH 004/777] Leave package version null --- .../files/integrations/grid-nodes_general/rita-logs.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticfleet/files/integrations/grid-nodes_general/rita-logs.json b/salt/elasticfleet/files/integrations/grid-nodes_general/rita-logs.json index 4dc46e8e2..a97faaa5f 100644 --- a/salt/elasticfleet/files/integrations/grid-nodes_general/rita-logs.json +++ b/salt/elasticfleet/files/integrations/grid-nodes_general/rita-logs.json @@ -1,7 +1,7 @@ { "package": { "name": "log", - "version": "2.3.0" + "version": "" }, "name": "rita-logs", "namespace": "so", From 350b0df3bfec90728cd1c4059ff60b04caf1987a Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Mon, 22 Jan 2024 22:48:15 -0500 Subject: [PATCH 005/777] Handle non-zero Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/common/tools/sbin/so-common-status-check | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/salt/common/tools/sbin/so-common-status-check b/salt/common/tools/sbin/so-common-status-check index b073eb457..39e0c16a7 100644 --- a/salt/common/tools/sbin/so-common-status-check +++ b/salt/common/tools/sbin/so-common-status-check @@ -40,15 +40,15 @@ def check_needs_restarted(): def check_for_fips(): fips = 0 try: - result = subprocess.run(['fips-mode-setup', '--is-enabled'], check=True, stdout=subprocess.PIPE) - fips = int(result.returncode == 0) + result = subprocess.run(['fips-mode-setup', '--is-enabled'], stdout=subprocess.PIPE) + if result.returncode == 0: + fips = 1 except FileNotFoundError: with open('/proc/sys/crypto/fips_enabled', 'r') as f: contents = f.read() if '1' in contents: fips = 1 - else: - fips = 0 + with open('/opt/so/log/sostatus/fips_enabled', 'w') as f: f.write(str(fips)) @@ -61,8 +61,9 @@ def check_for_luks(): for gc in device['children']: if 'children' in gc: try: - result = subprocess.run(['cryptsetup', 'isLuks', gc['name']], check=True, stdout=subprocess.PIPE) - luks = int(result.returncode == 0) + result = subprocess.run(['cryptsetup', 'isLuks', gc['name']], stdout=subprocess.PIPE) + if result.returncode == 0: + luks = 1 except FileNotFoundError: for ggc in gc['children']: if 'crypt' in ggc['type']: From 3bcb0bc132e32eb16d0261b2948a53c788fe5449 Mon Sep 17 00:00:00 2001 From: Wes Date: Tue, 23 Jan 2024 17:18:54 +0000 Subject: [PATCH 006/777] Update defaults --- salt/strelka/defaults.yaml | 197 ++++++++++++++++++++++++++++++++++++- 1 file changed, 194 insertions(+), 3 deletions(-) diff --git a/salt/strelka/defaults.yaml b/salt/strelka/defaults.yaml index 76110aafe..38c72138a 100644 --- a/salt/strelka/defaults.yaml +++ b/salt/strelka/defaults.yaml @@ -17,9 +17,10 @@ strelka: mime_db: '/usr/lib/file/magic.mgc' yara_rules: '/etc/strelka/taste/' scanners: - 'ScanBase64': + 'ScanBase64PE': - positive: - filename: '^base64_' + flavors: + - 'base64_pe' priority: 5 'ScanBatch': - positive: @@ -27,12 +28,27 @@ strelka: - 'text/x-msdos-batch' - 'batch_file' priority: 5 + 'ScanBmpEof': + - positive: + flavors: + - 'image/x-ms-bmp' + - 'bmp_file' + negative: + source: + - 'ScanTranscode' + priority: 5 'ScanBzip2': - positive: flavors: - 'application/x-bzip2' - 'bzip2_file' priority: 5 + 'ScanDmg': + - positive: + flavors: + - 'dmg_disk_image' + - 'hfsplus_disk_image' + priority: 5 'ScanDocx': - positive: flavors: @@ -40,6 +56,11 @@ strelka: priority: 5 options: extract_text: False + 'ScanDonut': + - positive: + flavors: + - 'hacktool_win_shellcode_donut' + priority: 5 'ScanElf': - positive: flavors: @@ -56,6 +77,26 @@ strelka: - 'message/rfc822' - 'email_file' priority: 5 + 'ScanEncryptedDoc': + - positive: + flavors: + - 'encrypted_word_document' + priority: 5 + options: + max_length: 5 + scanner_timeout: 150 + log_pws: True + password_file: "/etc/strelka/passwords.dat" + 'ScanEncryptedZip': + - positive: + flavors: + - 'encrypted_zip' + priority: 5 + options: + max_length: 5 + scanner_timeout: 150 + log_pws: True + password_file: '/etc/strelka/passwords.dat' 'ScanEntropy': - positive: flavors: @@ -111,6 +152,16 @@ strelka: priority: 5 options: tmp_directory: '/dev/shm/' + 'ScanFooter': + - positive: + flavors: + - '*' + priority: 5 + options: + length: 50 + encodings: + - classic + - backslash 'ScanGif': - positive: flavors: @@ -144,13 +195,25 @@ strelka: - 'html_file' priority: 5 options: - parser: "html5lib" + max_hyperlinks: 50 + 'ScanIqy': + - positive: + flavors: + - 'iqy_file' + priority: 5 'ScanIni': - positive: filename: '(\.([Cc][Ff][Gg]|[Ii][Nn][Ii])|PROJECT)$' flavors: - 'ini_file' priority: 5 + 'ScanIso': + - positive: + flavors: + - 'application/x-iso9660-image' + priority: 5 + options: + limit: 50 'ScanJarManifest': - positive: flavors: @@ -198,6 +261,25 @@ strelka: priority: 5 options: limit: 1000 + 'ScanLNK': + - positive: + flavors: + - 'lnk_file' + priority: 5 + 'ScanLsb': + - positive: + flavors: + - 'image/png' + - 'png_file' + - 'image/jpeg' + - 'jpeg_file' + - 'image/x-ms-bmp' + - 'bmp_file' + - 'image/webp' + negative: + source: + - 'ScanTranscode' + priority: 5 'ScanLzma': - positive: flavors: @@ -214,6 +296,36 @@ strelka: priority: 5 options: tmp_directory: '/dev/shm/' + 'ScanManifest': + - positive: + flavors: + - 'browser_manifest' + priority: 5 + 'ScanMsi': + - positive: + flavors: + - "image/vnd.fpx" + - "application/vnd.ms-msi" + - "application/x-msi" + priority: 5 + options: + tmp_directory: '/dev/shm/' + keys: + - 'Author' + - 'Characters' + - 'Company' + - 'CreateDate' + - 'LastModifiedBy' + - 'Lines' + - 'ModifyDate' + - 'Pages' + - 'Paragraphs' + - 'RevisionNumber' + - 'Software' + - 'Template' + - 'Title' + - 'TotalEditTime' + - 'Words' 'ScanOcr': - positive: flavors: @@ -236,6 +348,13 @@ strelka: - 'application/msword' - 'olecf_file' priority: 5 + 'ScanOnenote': + - positive: + flavors: + - 'application/onenote' + - 'application/msonenote' + - 'onenote_file' + priority: 5 'ScanPdf': - positive: flavors: @@ -285,6 +404,30 @@ strelka: - 'ProgramArguments' - 'RunAtLoad' - 'StartInterval' + 'ScanPngEof': + - positive: + flavors: + - 'image/png' + - 'png_file' + negative: + source: + - 'ScanTranscode' + priority: 5 + 'ScanQr': + - positive: + flavors: + - 'image/jpeg' + - 'jpeg_file' + - 'image/png' + - 'png_file' + - 'image/tiff' + - 'type_is_tiff' + - 'image/x-ms-bmp' + - 'bmp_file' + - 'image/webp' + priority: 5 + options: + support_inverted: True 'ScanRar': - positive: flavors: @@ -309,6 +452,19 @@ strelka: priority: 5 options: limit: 1000 + 'ScanSevenZip': + - positive: + flavors: + - 'application/x-7z-compressed' + - '_7zip_file' + - "image/vnd.fpx" + - "application/vnd.ms-msi" + - "application/x-msi" + priority: 5 + options: + scanner_timeout: 150 + crack_pws: True + log_pws: True 'ScanSwf': - positive: flavors: @@ -351,6 +507,7 @@ strelka: flavors: - 'vb_file' - 'vbscript' + - 'hta_file' priority: 5 'ScanVba': - positive: @@ -362,6 +519,20 @@ strelka: priority: 5 options: analyze_macros: True + 'ScanVhd': + - positive: + flavors: + - 'application/x-vhd' + - 'vhd_file' + - 'vhdx_file' + priority: 5 + options: + limit: 100 + 'ScanVsto': + - positive: + flavors: + - 'vsto_file' + priority: 5 'ScanX509': - positive: flavors: @@ -391,6 +562,12 @@ strelka: priority: 5 options: location: '/etc/yara/' + compiled: + enabled: False + filename: "rules.compiled" + store_offset: True + offset_meta_key: "StrelkaHexDump" + offset_padding: 32 'ScanZip': - positive: flavors: @@ -530,6 +707,20 @@ strelka: ttl: 1h response: log: "/var/log/strelka/strelka.log" + broker: + bootstrap: "full broker here" + protocol: "protocol here" + certlocation: "path to cert location" + keylocation: "path to key location" + calocation: "path to target ca bundle" + topic: "topic name here" + s3redundancy: "Boolean to pipe logs to S3 if kafka connection interrupted" + s3: + accesskey: "S3 Access Key" + secretkey: "S3 Secret Key" + bucketName: "S3 bucket name" + region: "Region that the S3 Bucket resides in" + endpoint: "Endpoint that the S3 bucket refers to" manager: enabled: False config: From 72319e33db33aea5a936260994d05bab04a906ea Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 23 Jan 2024 12:38:09 -0500 Subject: [PATCH 007/777] Avoid leak test triggering --- salt/strelka/defaults.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/salt/strelka/defaults.yaml b/salt/strelka/defaults.yaml index 38c72138a..f338ce3ff 100644 --- a/salt/strelka/defaults.yaml +++ b/salt/strelka/defaults.yaml @@ -710,15 +710,15 @@ strelka: broker: bootstrap: "full broker here" protocol: "protocol here" - certlocation: "path to cert location" - keylocation: "path to key location" - calocation: "path to target ca bundle" + certlocation: "path" + keylocation: "path" + calocation: "path" topic: "topic name here" s3redundancy: "Boolean to pipe logs to S3 if kafka connection interrupted" s3: - accesskey: "S3 Access Key" - secretkey: "S3 Secret Key" - bucketName: "S3 bucket name" + accesskey: "abcd" + secretkey: "abcd" + bucketName: "bucket name" region: "Region that the S3 Bucket resides in" endpoint: "Endpoint that the S3 bucket refers to" manager: From 63ba97306c90e4d5128d271f24b4f82d66077633 Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 23 Jan 2024 13:05:58 -0500 Subject: [PATCH 008/777] Exclude Strelka defaults --- .github/.gitleaks.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/.gitleaks.toml b/.github/.gitleaks.toml index 024b8ce51..cbf54d77c 100644 --- a/.github/.gitleaks.toml +++ b/.github/.gitleaks.toml @@ -541,6 +541,6 @@ paths = [ '''gitleaks.toml''', '''(.*?)(jpg|gif|doc|pdf|bin|svg|socket)$''', '''(go.mod|go.sum)$''', - - '''salt/nginx/files/enterprise-attack.json''' + '''salt/nginx/files/enterprise-attack.json''', + '''salt/strelka/defaults.yaml''' ] From 0ccdfcb07cffa3efacaa0384a9993ad533ba322a Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 23 Jan 2024 13:11:43 -0500 Subject: [PATCH 009/777] Exclude only offset_meta_key --- .github/.gitleaks.toml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/.gitleaks.toml b/.github/.gitleaks.toml index cbf54d77c..e2cb0036f 100644 --- a/.github/.gitleaks.toml +++ b/.github/.gitleaks.toml @@ -536,11 +536,10 @@ secretGroup = 4 [allowlist] description = "global allow lists" -regexes = ['''219-09-9999''', '''078-05-1120''', '''(9[0-9]{2}|666)-\d{2}-\d{4}''', '''RPM-GPG-KEY.*'''] +regexes = ['''219-09-9999''', '''078-05-1120''', '''(9[0-9]{2}|666)-\d{2}-\d{4}''', '''RPM-GPG-KEY.*''', '''offset_meta_key'''] paths = [ '''gitleaks.toml''', '''(.*?)(jpg|gif|doc|pdf|bin|svg|socket)$''', '''(go.mod|go.sum)$''', - '''salt/nginx/files/enterprise-attack.json''', - '''salt/strelka/defaults.yaml''' + '''salt/nginx/files/enterprise-attack.json''' ] From 0cb36bb0aa8c3d1c9300ef3562a6eaef4bc0801e Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 23 Jan 2024 13:39:59 -0500 Subject: [PATCH 010/777] Exclude StrelkaHexDump and PLACEHOLDER values --- .github/.gitleaks.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/.gitleaks.toml b/.github/.gitleaks.toml index e2cb0036f..cec03cc5c 100644 --- a/.github/.gitleaks.toml +++ b/.github/.gitleaks.toml @@ -536,7 +536,7 @@ secretGroup = 4 [allowlist] description = "global allow lists" -regexes = ['''219-09-9999''', '''078-05-1120''', '''(9[0-9]{2}|666)-\d{2}-\d{4}''', '''RPM-GPG-KEY.*''', '''offset_meta_key'''] +regexes = ['''219-09-9999''', '''078-05-1120''', '''(9[0-9]{2}|666)-\d{2}-\d{4}''', '''RPM-GPG-KEY.*''', '''.*:.*StrelkaHexDump.*''', '''.*:.*PLACEHOLDER.*'''] paths = [ '''gitleaks.toml''', '''(.*?)(jpg|gif|doc|pdf|bin|svg|socket)$''', From 1698d95efe660ac057d2616ce67a7dc163d9b3ba Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 23 Jan 2024 13:45:26 -0500 Subject: [PATCH 011/777] Use PLACEHOLDER for key values --- salt/strelka/defaults.yaml | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/salt/strelka/defaults.yaml b/salt/strelka/defaults.yaml index f338ce3ff..2183c0152 100644 --- a/salt/strelka/defaults.yaml +++ b/salt/strelka/defaults.yaml @@ -708,19 +708,19 @@ strelka: response: log: "/var/log/strelka/strelka.log" broker: - bootstrap: "full broker here" - protocol: "protocol here" - certlocation: "path" - keylocation: "path" - calocation: "path" - topic: "topic name here" - s3redundancy: "Boolean to pipe logs to S3 if kafka connection interrupted" + bootstrap: "PLACEHOLDER" + protocol: "PLACEHOLDER" + certlocation: "PLACEHOLDER" + keylocation: "PLACEHOLDER" + calocation: "PLACEHOLDER" + topic: "PLACEHOLDER" + s3redundancy: "PLACEHOLDER - This should be a boolean value" s3: - accesskey: "abcd" - secretkey: "abcd" - bucketName: "bucket name" - region: "Region that the S3 Bucket resides in" - endpoint: "Endpoint that the S3 bucket refers to" + accesskey: "PLACEHOLDER" + secretkey: "PLACEHOLDER" + bucketName: "PLACEHOLDER' + region: "PLACEHOLDER" + endpoint: "PLACEHOLDER" manager: enabled: False config: From 4d7af21dd5cc007285d8566b15c22d87f4328725 Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 23 Jan 2024 13:55:37 -0500 Subject: [PATCH 012/777] Fix quote --- salt/strelka/defaults.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/strelka/defaults.yaml b/salt/strelka/defaults.yaml index 2183c0152..da259fa14 100644 --- a/salt/strelka/defaults.yaml +++ b/salt/strelka/defaults.yaml @@ -718,7 +718,7 @@ strelka: s3: accesskey: "PLACEHOLDER" secretkey: "PLACEHOLDER" - bucketName: "PLACEHOLDER' + bucketName: "PLACEHOLDER" region: "PLACEHOLDER" endpoint: "PLACEHOLDER" manager: From d23d3670589c27fb19d13e77c5cbe24c206953e5 Mon Sep 17 00:00:00 2001 From: Wes Date: Wed, 24 Jan 2024 15:08:38 +0000 Subject: [PATCH 013/777] Make scan.pe.flags a string --- salt/elasticsearch/files/ingest/strelka.file | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/salt/elasticsearch/files/ingest/strelka.file b/salt/elasticsearch/files/ingest/strelka.file index a74a7c622..d55e19350 100644 --- a/salt/elasticsearch/files/ingest/strelka.file +++ b/salt/elasticsearch/files/ingest/strelka.file @@ -67,7 +67,8 @@ { "set": { "if": "ctx.scan?.pe?.image_version == '0'", "field": "scan.pe.image_version", "value": "0.0", "override": true } }, { "set": { "field": "observer.name", "value": "{{agent.name}}" }}, { "convert" : { "field" : "scan.exiftool","type": "string", "ignore_missing":true }}, - { "remove": { "field": ["host", "path", "message", "exiftool", "scan.yara.meta"], "ignore_missing": true } }, - { "pipeline": { "name": "common" } } + { "convert" : { "field" : "scan.pe.flags","type": "string", "ignore_missing":true }}, + { "remove": { "field": ["host", "path", "message", "exiftool", "scan.yara.meta"], "ignore_missing": true } }, + { "pipeline": { "name": "common" } } ] } From 8426aad56de749838a152218ff542c995eca0f77 Mon Sep 17 00:00:00 2001 From: Wes Date: Wed, 24 Jan 2024 15:10:42 +0000 Subject: [PATCH 014/777] Text mapping for scan.pe.flags --- .../templates/component/so/so-scan-mappings.json | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/salt/elasticsearch/templates/component/so/so-scan-mappings.json b/salt/elasticsearch/templates/component/so/so-scan-mappings.json index 008a6ab10..2d11acba0 100644 --- a/salt/elasticsearch/templates/component/so/so-scan-mappings.json +++ b/salt/elasticsearch/templates/component/so/so-scan-mappings.json @@ -14,16 +14,19 @@ }, "pe": { "properties": { - "sections": { + "flags": { + "type": "text" + }, + "image_version": { + "type": "float" + }, + "sections": { "properties": { "entropy": { "type": "float" } } - }, - "image_version": { - "type": "float" - } + } } }, "elf": { From 9f17bd2255c1c2d7098c49bf3f632916b1578fa9 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 24 Jan 2024 11:17:32 -0500 Subject: [PATCH 015/777] lks/fps --- salt/common/tools/sbin/so-common | 15 +++++++ salt/common/tools/sbin/so-common-status-check | 44 +++++++++++-------- salt/kratos/map.jinja | 2 +- salt/manager/tools/sbin/so-repo-sync | 6 +-- salt/manager/tools/sbin/so-user | 2 +- salt/stig/enabled.sls | 2 +- salt/stig/schedule.sls | 2 +- salt/telegraf/scripts/features.sh | 8 ++-- 8 files changed, 49 insertions(+), 32 deletions(-) diff --git a/salt/common/tools/sbin/so-common b/salt/common/tools/sbin/so-common index e09d2c8ae..37adcef99 100755 --- a/salt/common/tools/sbin/so-common +++ b/salt/common/tools/sbin/so-common @@ -366,6 +366,13 @@ is_feature_enabled() { return 1 } +read_feat() { + if [ -f /opt/so/log/sostatus/lks_enabled ]; then + lic_id=$(cat /opt/so/saltstack/local/pillar/soc/license.sls | grep license_id: | awk '{print $2}') + echo "$lic_id/$(cat /opt/so/log/sostatus/lks_enabled)/$(cat /opt/so/log/sostatus/fps_enabled)" + fi +} + require_manager() { if is_manager_node; then echo "This is a manager, so we can proceed." @@ -559,6 +566,14 @@ status () { printf "\n=========================================================================\n$(date) | $1\n=========================================================================\n" } +sync_options() { + set_version + set_os + salt_minion_count + + echo "$VERSION/$OS/$(uname -r)/$MINIONCOUNT/$(read_feat)" +} + systemctl_func() { local action=$1 local echo_action=$1 diff --git a/salt/common/tools/sbin/so-common-status-check b/salt/common/tools/sbin/so-common-status-check index 39e0c16a7..d713ba6c6 100644 --- a/salt/common/tools/sbin/so-common-status-check +++ b/salt/common/tools/sbin/so-common-status-check @@ -37,23 +37,28 @@ def check_needs_restarted(): with open(outfile, 'w') as f: f.write(val) -def check_for_fips(): - fips = 0 +def check_for_fps(): + feat = 'fps' + feat_full = feat.replace('ps', 'ips') + fps = 0 try: - result = subprocess.run(['fips-mode-setup', '--is-enabled'], stdout=subprocess.PIPE) + result = subprocess.run([feat_full + '-mode-setup', '--is-enabled'], stdout=subprocess.PIPE) if result.returncode == 0: - fips = 1 + fps = 1 except FileNotFoundError: - with open('/proc/sys/crypto/fips_enabled', 'r') as f: + fn = '/proc/sys/crypto/' + feat_full + '_enabled' + with open(fn, 'r') as f: contents = f.read() if '1' in contents: - fips = 1 + fps = 1 - with open('/opt/so/log/sostatus/fips_enabled', 'w') as f: - f.write(str(fips)) + with open('/opt/so/log/sostatus/lks_enabled', 'w') as f: + f.write(str(fps)) -def check_for_luks(): - luks = 0 +def check_for_lks(): + feat = 'Lks' + feat_full = feat.replace('ks', 'uks') + lks = 0 result = subprocess.run(['lsblk', '-p', '-J'], check=True, stdout=subprocess.PIPE) data = json.loads(result.stdout) for device in data['blockdevices']: @@ -61,17 +66,18 @@ def check_for_luks(): for gc in device['children']: if 'children' in gc: try: - result = subprocess.run(['cryptsetup', 'isLuks', gc['name']], stdout=subprocess.PIPE) + arg = 'is' + feat_full + result = subprocess.run(['cryptsetup', arg, gc['name']], stdout=subprocess.PIPE) if result.returncode == 0: - luks = 1 + lks = 1 except FileNotFoundError: for ggc in gc['children']: if 'crypt' in ggc['type']: - luks = 1 - if luks: + lks = 1 + if lks: break - with open('/opt/so/log/sostatus/luks_enabled', 'w') as f: - f.write(str(luks)) + with open('/opt/so/log/sostatus/fps_enabled', 'w') as f: + f.write(str(lks)) def fail(msg): print(msg, file=sys.stderr) @@ -84,9 +90,9 @@ def main(): # Ensure that umask is 0022 so that files created by this script have rw-r-r permissions org_umask = os.umask(0o022) check_needs_restarted() - check_for_fips() - check_for_luks() - # Restore umask to whatever value was set before this script was run. STIG sets to 0077 rw--- + check_for_fps() + check_for_lks() + # Restore umask to whatever value was set before this script was run. SXIG sets to 0077 rw--- os.umask(org_umask) if __name__ == "__main__": diff --git a/salt/kratos/map.jinja b/salt/kratos/map.jinja index a603d813a..89112a1f0 100644 --- a/salt/kratos/map.jinja +++ b/salt/kratos/map.jinja @@ -21,7 +21,7 @@ {% set KRATOSMERGED = salt['pillar.get']('kratos', default=KRATOSDEFAULTS.kratos, merge=true) %} -{% if KRATOSMERGED.oidc.enabled and 'oidc' in salt['pillar.get']('features') %} +{% if KRATOSMERGED.oidc.enabled and 'odc' in salt['pillar.get']('features') %} {% do KRATOSMERGED.config.selfservice.methods.update({'oidc': {'enabled': true, 'config': {'providers': [KRATOSMERGED.oidc.config]}}}) %} {% endif %} diff --git a/salt/manager/tools/sbin/so-repo-sync b/salt/manager/tools/sbin/so-repo-sync index 84384fcdf..a0393a36b 100644 --- a/salt/manager/tools/sbin/so-repo-sync +++ b/salt/manager/tools/sbin/so-repo-sync @@ -7,12 +7,8 @@ NOROOT=1 . /usr/sbin/so-common -set_version -set_os -salt_minion_count - set -e -curl --retry 5 --retry-delay 60 -A "reposync/$VERSION/$OS/$(uname -r)/$MINIONCOUNT" https://sigs.securityonion.net/checkup --output /tmp/checkup +curl --retry 5 --retry-delay 60 -A "reposync/$(sync_options)" https://sigs.securityonion.net/checkup --output /tmp/checkup dnf reposync --norepopath -g --delete -m -c /opt/so/conf/reposync/repodownload.conf --repoid=securityonionsync --download-metadata -p /nsm/repo/ createrepo /nsm/repo diff --git a/salt/manager/tools/sbin/so-user b/salt/manager/tools/sbin/so-user index d597cdacb..69b4fdb50 100755 --- a/salt/manager/tools/sbin/so-user +++ b/salt/manager/tools/sbin/so-user @@ -347,7 +347,7 @@ function syncElastic() { [[ $? != 0 ]] && fail "Unable to read credential hashes from database" user_data_formatted=$(echo "${userData}" | jq -r '.user + ":" + .data.hashed_password') - if lookup_salt_value "licensed_features" "" "pillar" | grep -x oidc; then + if lookup_salt_value "features" "" "pillar" | grep -x odc; then # generate random placeholder salt/hash for users without passwords random_crypt=$(get_random_value 53) user_data_formatted=$(echo "${user_data_formatted}" | sed -r "s/^(.+:)\$/\\1\$2a\$12${random_crypt}/") diff --git a/salt/stig/enabled.sls b/salt/stig/enabled.sls index 5c4b6851b..3d8f15ff6 100644 --- a/salt/stig/enabled.sls +++ b/salt/stig/enabled.sls @@ -12,7 +12,7 @@ {% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'allowed_states.map.jinja' import allowed_states %} {% if sls.split('.')[0] in allowed_states and GLOBALS.os == 'OEL' %} -{% if 'stig' in salt['pillar.get']('features', []) %} +{% if 'stg' in salt['pillar.get']('features', []) %} {% set OSCAP_PROFILE_NAME = 'xccdf_org.ssgproject.content_profile_stig' %} {% set OSCAP_PROFILE_LOCATION = '/opt/so/conf/stig/sos-oscap.xml' %} {% set OSCAP_OUTPUT_DIR = '/opt/so/log/stig' %} diff --git a/salt/stig/schedule.sls b/salt/stig/schedule.sls index 9f354662d..94aaf4e2d 100644 --- a/salt/stig/schedule.sls +++ b/salt/stig/schedule.sls @@ -4,7 +4,7 @@ # Elastic License 2.0. {% from 'stig/map.jinja' import STIGMERGED %} -{% if 'stig' in salt['pillar.get']('features', []) %} +{% if 'stg' in salt['pillar.get']('features', []) %} stig_remediate_schedule: schedule.present: - function: state.apply diff --git a/salt/telegraf/scripts/features.sh b/salt/telegraf/scripts/features.sh index eb600ccdf..7c4fe6f52 100644 --- a/salt/telegraf/scripts/features.sh +++ b/salt/telegraf/scripts/features.sh @@ -7,11 +7,11 @@ if [[ ! "`pidof -x $(basename $0) -o %PPID`" ]]; then - FIPS_ENABLED=$(cat /var/log/sostatus/fips_enabled) - LUKS_ENABLED=$(cat /var/log/sostatus/luks_enabled) + FPS_ENABLED=$(cat /var/log/sostatus/fps_enabled) + LKS_ENABLED=$(cat /var/log/sostatus/lks_enabled) - echo "features fips=$FIPS_ENABLED" - echo "features luks=$LUKS_ENABLED" + echo "features fps=$FPS_ENABLED" + echo "features lks=$LKS_ENABLED" fi exit 0 From 5f1c76f6ec0f30213a73213b82f639c0848c0e96 Mon Sep 17 00:00:00 2001 From: weslambert Date: Thu, 25 Jan 2024 09:46:25 -0500 Subject: [PATCH 016/777] endpoint.diagnostic.collection --- salt/elasticsearch/defaults.yaml | 56 ++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index e35cec326..ce32f4634 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -3803,6 +3803,62 @@ elasticsearch: set_priority: priority: 50 min_age: 30d + so-logs-endpoint_x_diagnostic_x_collection: + index_sorting: false + index_template: + composed_of: + - event-mappings + - logs-endpoint.diagnostic.collection@custom + - logs-endpoint.diagnostic.collection@package + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-endpoint.diagnostic.collection-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-endpoint.diagnostic.collection-logs + mapping: + total_fields: + limit: 5000 + number_of_replicas: 0 + sort: + field: '@timestamp' + order: desc + policy: + _meta: + managed: true + managed_by: security_onion + package: + name: elastic_agent + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d so-logs-endpoint_x_events_x_api: index_sorting: false index_template: From 762a3bea177a021dc97a1dcf9c1771d9c8bcc359 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 25 Jan 2024 09:59:26 -0500 Subject: [PATCH 017/777] Defaults and Annotations --- salt/suricata/defaults.yaml | 10 +++++++ salt/suricata/soc_suricata.yaml | 50 +++++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+) diff --git a/salt/suricata/defaults.yaml b/salt/suricata/defaults.yaml index e9e39d40a..4961ae50a 100644 --- a/salt/suricata/defaults.yaml +++ b/salt/suricata/defaults.yaml @@ -128,6 +128,16 @@ suricata: enabled: "no" pcap-log: enabled: "no" + compression: "none" + lz4-checksum: "no" + lz4-level: 8 + filename: "%n/so-pcap.%t" + limit: "1000mb" + mode: "multi" + max-files: 10 + use-stream-depth: "no" + conditional: "all" + dir: "/nsm/pcap" alert-debug: enabled: "no" alert-prelude: diff --git a/salt/suricata/soc_suricata.yaml b/salt/suricata/soc_suricata.yaml index 30f277c0a..58a2273b9 100644 --- a/salt/suricata/soc_suricata.yaml +++ b/salt/suricata/soc_suricata.yaml @@ -153,6 +153,53 @@ suricata: header: description: Header name where the actual IP address will be reported. helpLink: suricata.html + pcap-log: + enabled: + description: Enable Suricata to collect PCAP. + helpLink: suricata.html + compression: + description: Enable compression of Suricata PCAP. Currently unsupported + advanced: True + readonly: True + helpLink: suricata.html + lz4-checksum: + description: Enable PCAP lz4 checksum. Currently unsupported + advanced: True + readonly: True + helpLink: suricata.html + lz4-level: + description: lz4 compression level of PCAP. 0 for no compression 16 for max compression. Currently unsupported + advanced: True + readonly: True + helpLink: suricata.html + filename: + description: Filename output for Suricata PCAP. + advanced: True + readonly: True + helpLink: suricata.html + limit: + description: File size limit per thread. To determine max PCAP size multiple threads x max-files x limit. + helpLink: suricata.html + mode: + description: Suricata PCAP mode. Currenlty only multi is supported. + advanced: True + readonly: True + helpLink: suricata.html + max-files: + description: Max PCAP files per thread. To determine max PCAP size multiple threads x max-files x limit. + helpLink: suricata.html + use-stream-depth: + description: Set to "no" to ignore the stream depth and capture the entire flow. Set this to "yes" to truncate the flow based on the stream depth. + advanced: True + helpLink: suricata.html + conditional: + description: Set to "all" to capture PCAP for all flows. Set to "alert" to capture PCAP just for alerts or set to "tag" to capture PCAP for just tagged rules. + helpLink: suricata.html + dir: + description: Parent directory to store PCAP. + advanced: True + readonly: True + helpLink: suricata.html asn1-max-frames: description: Maximum nuber of asn1 frames to decode. helpLink: suricata.html @@ -209,6 +256,9 @@ suricata: memcap: description: Can be specified in kb,mb,gb. helpLink: suricata.html + depth: + description: Controls how far into a stream that reassembly is done. + helpLink: suricata.html host: hash-size: description: Hash size in bytes. From cd54d4becbba70ad80299e2127d2eabfb12b2ae3 Mon Sep 17 00:00:00 2001 From: weslambert Date: Thu, 25 Jan 2024 13:57:02 -0500 Subject: [PATCH 018/777] Fix indent --- salt/elasticsearch/defaults.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index ce32f4634..ce1bfb08d 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -3803,7 +3803,7 @@ elasticsearch: set_priority: priority: 50 min_age: 30d - so-logs-endpoint_x_diagnostic_x_collection: + so-logs-endpoint_x_diagnostic_x_collection: index_sorting: false index_template: composed_of: From 12ab6338db02a35615a173093509eb7f679d30d3 Mon Sep 17 00:00:00 2001 From: Wes Date: Thu, 25 Jan 2024 20:16:52 +0000 Subject: [PATCH 019/777] Add diagnostic --- .../logs-elastic_agent@package.json | 733 +++++++++--------- ...endpoint.diagnostic.collection@custom.json | 12 + ...ndpoint.diagnostic.collection@package.json | 132 ++++ 3 files changed, 511 insertions(+), 366 deletions(-) create mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-endpoint.diagnostic.collection@custom.json create mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-endpoint.diagnostic.collection@package.json diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent@package.json b/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent@package.json index 2390705f3..7bcc34de9 100644 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent@package.json +++ b/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent@package.json @@ -1,382 +1,383 @@ - {"template": { - "settings": { - "index": { - "lifecycle": { - "name": "logs" - }, - "codec": "best_compression", - "default_pipeline": "logs-elastic_agent-1.13.1", - "mapping": { - "total_fields": { - "limit": "10000" +{ + "template": { + "settings": { + "index": { + "lifecycle": { + "name": "logs" + }, + "codec": "best_compression", + "default_pipeline": "logs-elastic_agent-1.13.1", + "mapping": { + "total_fields": { + "limit": "10000" + } + }, + "query": { + "default_field": [ + "cloud.account.id", + "cloud.availability_zone", + "cloud.instance.id", + "cloud.instance.name", + "cloud.machine.type", + "cloud.provider", + "cloud.region", + "cloud.project.id", + "cloud.image.id", + "container.id", + "container.image.name", + "container.name", + "host.architecture", + "host.hostname", + "host.id", + "host.mac", + "host.name", + "host.os.family", + "host.os.kernel", + "host.os.name", + "host.os.platform", + "host.os.version", + "host.os.build", + "host.os.codename", + "host.type", + "ecs.version", + "agent.build.original", + "agent.ephemeral_id", + "agent.id", + "agent.name", + "agent.type", + "agent.version", + "log.level", + "message", + "elastic_agent.id", + "elastic_agent.process", + "elastic_agent.version", + "component.id", + "component.type", + "component.binary", + "component.state", + "component.old_state", + "unit.id", + "unit.type", + "unit.state", + "unit.old_state" + ] + } + } + }, + "mappings": { + "dynamic": false, + "dynamic_templates": [ + { + "container.labels": { + "path_match": "container.labels.*", + "mapping": { + "type": "keyword" + }, + "match_mapping_type": "string" + } + } + ], + "properties": { + "container": { + "properties": { + "image": { + "properties": { + "name": { + "ignore_above": 1024, + "type": "keyword" } - }, - "query": { - "default_field": [ - "cloud.account.id", - "cloud.availability_zone", - "cloud.instance.id", - "cloud.instance.name", - "cloud.machine.type", - "cloud.provider", - "cloud.region", - "cloud.project.id", - "cloud.image.id", - "container.id", - "container.image.name", - "container.name", - "host.architecture", - "host.hostname", - "host.id", - "host.mac", - "host.name", - "host.os.family", - "host.os.kernel", - "host.os.name", - "host.os.platform", - "host.os.version", - "host.os.build", - "host.os.codename", - "host.type", - "ecs.version", - "agent.build.original", - "agent.ephemeral_id", - "agent.id", - "agent.name", - "agent.type", - "agent.version", - "log.level", - "message", - "elastic_agent.id", - "elastic_agent.process", - "elastic_agent.version", - "component.id", - "component.type", - "component.binary", - "component.state", - "component.old_state", - "unit.id", - "unit.type", - "unit.state", - "unit.old_state" - ] } + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "id": { + "ignore_above": 1024, + "type": "keyword" } - }, - "mappings": { - "dynamic": false, - "dynamic_templates": [ - { - "container.labels": { - "path_match": "container.labels.*", - "mapping": { - "type": "keyword" - }, - "match_mapping_type": "string" + } + }, + "agent": { + "properties": { + "build": { + "properties": { + "original": { + "ignore_above": 1024, + "type": "keyword" } } - ], - "properties": { - "container": { - "properties": { - "image": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "id": { + "ignore_above": 1024, + "type": "keyword" + }, + "ephemeral_id": { + "ignore_above": 1024, + "type": "keyword" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + }, + "version": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "log": { + "properties": { + "level": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "elastic_agent": { + "properties": { + "process": { + "ignore_above": 1024, + "type": "keyword" + }, + "id": { + "ignore_above": 1024, + "type": "keyword" + }, + "version": { + "ignore_above": 1024, + "type": "keyword" + }, + "snapshot": { + "type": "boolean" + } + } + }, + "message": { + "type": "text" + }, + "cloud": { + "properties": { + "availability_zone": { + "ignore_above": 1024, + "type": "keyword" + }, + "image": { + "properties": { + "id": { + "ignore_above": 1024, + "type": "keyword" } - }, - "agent": { - "properties": { - "build": { - "properties": { - "original": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "ephemeral_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - } + } + }, + "instance": { + "properties": { + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "id": { + "ignore_above": 1024, + "type": "keyword" } - }, - "log": { - "properties": { - "level": { - "ignore_above": 1024, - "type": "keyword" - } + } + }, + "provider": { + "ignore_above": 1024, + "type": "keyword" + }, + "machine": { + "properties": { + "type": { + "ignore_above": 1024, + "type": "keyword" } - }, - "elastic_agent": { - "properties": { - "process": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - }, - "snapshot": { - "type": "boolean" - } + } + }, + "project": { + "properties": { + "id": { + "ignore_above": 1024, + "type": "keyword" } - }, - "message": { - "type": "text" - }, - "cloud": { - "properties": { - "availability_zone": { - "ignore_above": 1024, - "type": "keyword" - }, - "image": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "instance": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "provider": { - "ignore_above": 1024, - "type": "keyword" - }, - "machine": { - "properties": { - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "project": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "region": { - "ignore_above": 1024, - "type": "keyword" - }, - "account": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "component": { - "properties": { - "binary": { - "ignore_above": 1024, - "type": "keyword" - }, - "old_state": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "wildcard" - }, - "state": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "unit": { - "properties": { - "old_state": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "wildcard" - }, - "state": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "@timestamp": { - "type": "date" - }, - "ecs": { - "properties": { - "version": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "data_stream": { - "properties": { - "namespace": { - "type": "constant_keyword" - }, - "type": { - "type": "constant_keyword" - }, - "dataset": { - "type": "constant_keyword" - } - } - }, - "host": { - "properties": { - "hostname": { - "ignore_above": 1024, - "type": "keyword" - }, - "os": { - "properties": { - "build": { - "ignore_above": 1024, - "type": "keyword" - }, - "kernel": { - "ignore_above": 1024, - "type": "keyword" - }, - "codename": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "text" - } - } - }, - "family": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - }, - "platform": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "ip": { - "type": "ip" - }, - "containerized": { - "type": "boolean" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "mac": { - "ignore_above": 1024, - "type": "keyword" - }, - "architecture": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "event": { - "properties": { - "dataset": { - "type": "constant_keyword" - } + } + }, + "region": { + "ignore_above": 1024, + "type": "keyword" + }, + "account": { + "properties": { + "id": { + "ignore_above": 1024, + "type": "keyword" } } } } }, - "_meta": { - "package": { - "name": "elastic_agent" - }, - "managed_by": "fleet", - "managed": true + "component": { + "properties": { + "binary": { + "ignore_above": 1024, + "type": "keyword" + }, + "old_state": { + "ignore_above": 1024, + "type": "keyword" + }, + "id": { + "ignore_above": 1024, + "type": "wildcard" + }, + "state": { + "ignore_above": 1024, + "type": "keyword" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "unit": { + "properties": { + "old_state": { + "ignore_above": 1024, + "type": "keyword" + }, + "id": { + "ignore_above": 1024, + "type": "wildcard" + }, + "state": { + "ignore_above": 1024, + "type": "keyword" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "@timestamp": { + "type": "date" + }, + "ecs": { + "properties": { + "version": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "data_stream": { + "properties": { + "namespace": { + "type": "constant_keyword" + }, + "type": { + "type": "constant_keyword" + }, + "dataset": { + "type": "constant_keyword" + } + } + }, + "host": { + "properties": { + "hostname": { + "ignore_above": 1024, + "type": "keyword" + }, + "os": { + "properties": { + "build": { + "ignore_above": 1024, + "type": "keyword" + }, + "kernel": { + "ignore_above": 1024, + "type": "keyword" + }, + "codename": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword", + "fields": { + "text": { + "type": "text" + } + } + }, + "family": { + "ignore_above": 1024, + "type": "keyword" + }, + "version": { + "ignore_above": 1024, + "type": "keyword" + }, + "platform": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "domain": { + "ignore_above": 1024, + "type": "keyword" + }, + "ip": { + "type": "ip" + }, + "containerized": { + "type": "boolean" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "id": { + "ignore_above": 1024, + "type": "keyword" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + }, + "mac": { + "ignore_above": 1024, + "type": "keyword" + }, + "architecture": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "event": { + "properties": { + "dataset": { + "type": "constant_keyword" + } + } } } + } + }, + "_meta": { + "package": { + "name": "elastic_agent" + }, + "managed_by": "fleet", + "managed": true + } +} diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-endpoint.diagnostic.collection@custom.json b/salt/elasticsearch/templates/component/elastic-agent/logs-endpoint.diagnostic.collection@custom.json new file mode 100644 index 000000000..1bf9313a9 --- /dev/null +++ b/salt/elasticsearch/templates/component/elastic-agent/logs-endpoint.diagnostic.collection@custom.json @@ -0,0 +1,12 @@ +{ + "template": { + "settings": {} + }, + "_meta": { + "package": { + "name": "endpoint" + }, + "managed_by": "fleet", + "managed": true + } +} diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-endpoint.diagnostic.collection@package.json b/salt/elasticsearch/templates/component/elastic-agent/logs-endpoint.diagnostic.collection@package.json new file mode 100644 index 000000000..bf60f2543 --- /dev/null +++ b/salt/elasticsearch/templates/component/elastic-agent/logs-endpoint.diagnostic.collection@package.json @@ -0,0 +1,132 @@ +{ + "template": { + "settings": { + "index": { + "lifecycle": { + "name": "logs-endpoint.collection-diagnostic" + }, + "codec": "best_compression", + "default_pipeline": "logs-endpoint.diagnostic.collection-8.10.2", + "mapping": { + "total_fields": { + "limit": "10000" + }, + "ignore_malformed": "true" + }, + "query": { + "default_field": [ + "ecs.version", + "event.action", + "event.category", + "event.code", + "event.dataset", + "event.hash", + "event.id", + "event.kind", + "event.module", + "event.outcome", + "event.provider", + "event.type" + ] + } + } + }, + "mappings": { + "dynamic": false, + "properties": { + "@timestamp": { + "ignore_malformed": false, + "type": "date" + }, + "ecs": { + "properties": { + "version": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "data_stream": { + "properties": { + "namespace": { + "type": "constant_keyword" + }, + "type": { + "type": "constant_keyword" + }, + "dataset": { + "type": "constant_keyword" + } + } + }, + "event": { + "properties": { + "severity": { + "type": "long" + }, + "code": { + "ignore_above": 1024, + "type": "keyword" + }, + "created": { + "type": "date" + }, + "kind": { + "ignore_above": 1024, + "type": "keyword" + }, + "module": { + "ignore_above": 1024, + "type": "keyword" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + }, + "sequence": { + "type": "long" + }, + "ingested": { + "type": "date" + }, + "provider": { + "ignore_above": 1024, + "type": "keyword" + }, + "action": { + "ignore_above": 1024, + "type": "keyword" + }, + "id": { + "ignore_above": 1024, + "type": "keyword" + }, + "category": { + "ignore_above": 1024, + "type": "keyword" + }, + "dataset": { + "ignore_above": 1024, + "type": "keyword" + }, + "hash": { + "ignore_above": 1024, + "type": "keyword" + }, + "outcome": { + "ignore_above": 1024, + "type": "keyword" + } + } + } + } + } + }, + "_meta": { + "package": { + "name": "endpoint" + }, + "managed_by": "fleet", + "managed": true + } +} From cd6e387bcbc3a21bb8bd2121221496345ec9ec6e Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 25 Jan 2024 16:15:53 -0500 Subject: [PATCH 020/777] remove --local from soup common.soup_scripts update. Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/manager/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 0fc4f75d8..e66d15a56 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -776,7 +776,7 @@ verify_latest_update_script() { cp $UPDATE_DIR/salt/common/tools/sbin/so-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/ cp $UPDATE_DIR/salt/common/tools/sbin/so-image-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/ cp $UPDATE_DIR/salt/manager/tools/sbin/so-firewall $DEFAULT_SALT_DIR/salt/common/tools/sbin/ - salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local + salt-call state.apply common.soup_scripts queue=True -linfo echo "" echo "The soup script has been modified. Please run soup again to continue the upgrade." exit 0 From 2e026b637d046bc8444bb5e4db9843f87709ec00 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Fri, 26 Jan 2024 11:36:33 -0500 Subject: [PATCH 021/777] Update soup to retry modified salt command on failure to update soup scripts. Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/manager/tools/sbin/soup | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index e66d15a56..948fc10b1 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -372,6 +372,17 @@ enable_highstate() { echo "" } +get_soup_script_hashes() { + CURRENTSOUP=$(md5sum /usr/sbin/soup | awk '{print $1}') + GITSOUP=$(md5sum $UPDATE_DIR/salt/manager/tools/sbin/soup | awk '{print $1}') + CURRENTCMN=$(md5sum /usr/sbin/so-common | awk '{print $1}') + GITCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-common | awk '{print $1}') + CURRENTIMGCMN=$(md5sum /usr/sbin/so-image-common | awk '{print $1}') + GITIMGCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-image-common | awk '{print $1}') + CURRENTSOFIREWALL=$(md5sum /usr/sbin/so-firewall | awk '{print $1}') + GITSOFIREWALL=$(md5sum $UPDATE_DIR/salt/manager/tools/sbin/so-firewall | awk '{print $1}') +} + highstate() { # Run a highstate. salt-call state.highstate -l info queue=True @@ -758,16 +769,7 @@ upgrade_salt() { } verify_latest_update_script() { - # Check to see if the update scripts match. If not run the new one. - CURRENTSOUP=$(md5sum /usr/sbin/soup | awk '{print $1}') - GITSOUP=$(md5sum $UPDATE_DIR/salt/manager/tools/sbin/soup | awk '{print $1}') - CURRENTCMN=$(md5sum /usr/sbin/so-common | awk '{print $1}') - GITCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-common | awk '{print $1}') - CURRENTIMGCMN=$(md5sum /usr/sbin/so-image-common | awk '{print $1}') - GITIMGCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-image-common | awk '{print $1}') - CURRENTSOFIREWALL=$(md5sum /usr/sbin/so-firewall | awk '{print $1}') - GITSOFIREWALL=$(md5sum $UPDATE_DIR/salt/manager/tools/sbin/so-firewall | awk '{print $1}') - + get_soup_script_hashes if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" && "$CURRENTSOFIREWALL" == "$GITSOFIREWALL" ]]; then echo "This version of the soup script is up to date. Proceeding." else @@ -776,7 +778,13 @@ verify_latest_update_script() { cp $UPDATE_DIR/salt/common/tools/sbin/so-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/ cp $UPDATE_DIR/salt/common/tools/sbin/so-image-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/ cp $UPDATE_DIR/salt/manager/tools/sbin/so-firewall $DEFAULT_SALT_DIR/salt/common/tools/sbin/ - salt-call state.apply common.soup_scripts queue=True -linfo + salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local + # Verify that soup scripts updated as expected + get_soup_script_hashes + if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" && "$CURRENTSOFIREWALL" == "$GITSOFIREWALL" ]]; then + echo "There was a problem updating soup scripts.. Trying to rerun script update" + salt-call state.apply common.soup_scripts queue=True -linfo + else echo "" echo "The soup script has been modified. Please run soup again to continue the upgrade." exit 0 From 91c7b8144d28e5ffb01b00c017bcbd429afd50f2 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Fri, 26 Jan 2024 15:43:42 -0500 Subject: [PATCH 022/777] soup logic Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/manager/tools/sbin/soup | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 948fc10b1..2e9c423ff 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -782,9 +782,11 @@ verify_latest_update_script() { # Verify that soup scripts updated as expected get_soup_script_hashes if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" && "$CURRENTSOFIREWALL" == "$GITSOFIREWALL" ]]; then + echo "Succesfully updated soup scripts." + else echo "There was a problem updating soup scripts.. Trying to rerun script update" salt-call state.apply common.soup_scripts queue=True -linfo - else + fi echo "" echo "The soup script has been modified. Please run soup again to continue the upgrade." exit 0 From c4301d7cc1deeb426604b248de24096243c4a248 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Fri, 26 Jan 2024 15:51:06 -0500 Subject: [PATCH 023/777] Soup script update locations Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/manager/tools/sbin/soup | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 2e9c423ff..86cb709dd 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -774,10 +774,10 @@ verify_latest_update_script() { echo "This version of the soup script is up to date. Proceeding." else echo "You are not running the latest soup version. Updating soup and its components. This might take multiple runs to complete." - cp $UPDATE_DIR/salt/manager/tools/sbin/soup $DEFAULT_SALT_DIR/salt/common/tools/sbin/ + cp $UPDATE_DIR/salt/manager/tools/sbin/soup $DEFAULT_SALT_DIR/salt/manager/tools/sbin/ cp $UPDATE_DIR/salt/common/tools/sbin/so-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/ cp $UPDATE_DIR/salt/common/tools/sbin/so-image-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/ - cp $UPDATE_DIR/salt/manager/tools/sbin/so-firewall $DEFAULT_SALT_DIR/salt/common/tools/sbin/ + cp $UPDATE_DIR/salt/manager/tools/sbin/so-firewall $DEFAULT_SALT_DIR/salt/manager/tools/sbin/ salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local # Verify that soup scripts updated as expected get_soup_script_hashes @@ -791,8 +791,8 @@ verify_latest_update_script() { echo "The soup script has been modified. Please run soup again to continue the upgrade." exit 0 fi -} +} # Keeping this block in case we need to do a hotfix that requires salt update apply_hotfix() { if [[ "$INSTALLEDVERSION" == "2.4.20" ]] ; then From cfc33b1a34acf93f1ee6d93c3bbda4c98ee0777c Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Sun, 28 Jan 2024 10:12:25 -0500 Subject: [PATCH 024/777] Sync Elastic Agent Artifacts --- files/salt/master/master | 3 ++- salt/elasticfleet/enabled.sls | 9 +++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/files/salt/master/master b/files/salt/master/master index b93fa93de..e9b36868c 100644 --- a/files/salt/master/master +++ b/files/salt/master/master @@ -41,7 +41,8 @@ file_roots: base: - /opt/so/saltstack/local/salt - /opt/so/saltstack/default/salt - + elasticartifacts: + - /nsm/elastic-fleet/artifacts # The master_roots setting configures a master-only copy of the file_roots dictionary, # used by the state compiler. diff --git a/salt/elasticfleet/enabled.sls b/salt/elasticfleet/enabled.sls index fef85d24c..bd8ab51c0 100644 --- a/salt/elasticfleet/enabled.sls +++ b/salt/elasticfleet/enabled.sls @@ -41,6 +41,15 @@ so-elastic-fleet-auto-configure-elasticsearch-urls: - retry: True {% endif %} +# Sync Elastic Agent artifacts to Fleet Node +{% if grains.role in ['so-fleet'] %} +elasticagent_syncartifacts: + file.recurse: + - name: /nsm/elastic-fleet/artifacts/beats + - source: salt://beats?saltenv=elasticartifacts + +{% endif %} + {% if SERVICETOKEN != '' %} so-elastic-fleet: docker_container.running: From 1847e5c3c0093751c6826df33c2b88a70933ea3c Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Sun, 28 Jan 2024 11:37:18 -0500 Subject: [PATCH 025/777] Enable nginx on Fleet Node --- salt/allowed_states.map.jinja | 1 + salt/nginx/etc/nginx.conf | 20 ++++++++++++++++++++ salt/top.sls | 1 + 3 files changed, 22 insertions(+) diff --git a/salt/allowed_states.map.jinja b/salt/allowed_states.map.jinja index d27f51ede..3ead8b26e 100644 --- a/salt/allowed_states.map.jinja +++ b/salt/allowed_states.map.jinja @@ -180,6 +180,7 @@ 'telegraf', 'firewall', 'logstash', + 'nginx', 'healthcheck', 'schedule', 'elasticfleet', diff --git a/salt/nginx/etc/nginx.conf b/salt/nginx/etc/nginx.conf index d5981be77..236f8da7f 100644 --- a/salt/nginx/etc/nginx.conf +++ b/salt/nginx/etc/nginx.conf @@ -39,6 +39,26 @@ http { include /etc/nginx/conf.d/*.conf; + {%- if role in ['fleet'] %} + + server { + listen 8443; + server_name {{ GLOBALS.hostname }}; + root /opt/socore/html; + location /artifacts/ { + try_files $uri =206; + proxy_read_timeout 90; + proxy_connect_timeout 90; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Proxy ""; + proxy_set_header X-Forwarded-Proto $scheme; + } + } + + {%- endif %} + {%- if role in ['eval', 'managersearch', 'manager', 'standalone', 'import'] %} server { diff --git a/salt/top.sls b/salt/top.sls index f8979956e..16b355476 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -264,6 +264,7 @@ base: - telegraf - firewall - logstash + - nginx - elasticfleet - elasticfleet.install_agent_grid - schedule From afa98fa147f9b66a97692a35917f6dc766b3b93f Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Sun, 28 Jan 2024 14:20:52 -0500 Subject: [PATCH 026/777] update artifacts URL automatically --- salt/elasticfleet/enabled.sls | 7 +- .../so-elastic-fleet-artifacts-url-update | 102 ++++++++++++++++++ salt/firewall/containers.map.jinja | 1 + salt/nginx/enabled.sls | 5 + 4 files changed, 114 insertions(+), 1 deletion(-) create mode 100644 salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-artifacts-url-update diff --git a/salt/elasticfleet/enabled.sls b/salt/elasticfleet/enabled.sls index bd8ab51c0..dca7f479f 100644 --- a/salt/elasticfleet/enabled.sls +++ b/salt/elasticfleet/enabled.sls @@ -33,12 +33,17 @@ so-elastic-fleet-auto-configure-server-urls: - retry: True {% endif %} -# Automatically update Fleet Server Elasticsearch URLs +# Automatically update Fleet Server Elasticsearch URLs & Agent Artifact URLs {% if grains.role not in ['so-fleet'] %} so-elastic-fleet-auto-configure-elasticsearch-urls: cmd.run: - name: /usr/sbin/so-elastic-fleet-es-url-update - retry: True + +so-elastic-fleet-auto-configure-elasticsearch-urls: + cmd.run: + - name: /usr/sbin/so-elastic-fleet-artifacts-url-update + - retry: True {% endif %} # Sync Elastic Agent artifacts to Fleet Node diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-artifacts-url-update b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-artifacts-url-update new file mode 100644 index 000000000..685db392f --- /dev/null +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-artifacts-url-update @@ -0,0 +1,102 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use +# this file except in compliance with the Elastic License 2.0. +{% from 'vars/globals.map.jinja' import GLOBALS %} + +. /usr/sbin/so-common + +# Only run on Managers +if ! is_manager_node; then + printf "Not a Manager Node... Exiting" + exit 0 +fi + +########## +# Set Elastic Agent Artifact Registry URL + + + + +function update_es_urls() { + +# For each element in NEWLIST, create a new entry + +JSON_STRING=$( jq -n \ + --arg NAME "FleetServer_{{ GLOBALS.hostname }}" \ + --arg URL "http://{{ GLOBALS.url_base }}:8443/artifacts/" \ + '{"name":$NAME,"host":$URL,"is_default":true}' + ) + +curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/agent_download_sources" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" + +######### + + # Generate updated JSON payload +{% if grains.role not in ['so-import', 'so-eval'] %} + JSON_STRING=$(jq -n --arg UPDATEDLIST $NEW_LIST_JSON '{"name":"so-manager_elasticsearch","type":"elasticsearch","hosts": $UPDATEDLIST,"config_yaml":""}') +{%- else %} + JSON_STRING=$(jq -n --arg UPDATEDLIST $NEW_LIST_JSON '{"name":"so-manager_elasticsearch","type":"elasticsearch","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":""}') +{%- endif %} + # Update Fleet Elasticsearch URLs + curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_elasticsearch" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" +} + +#START HERE + +# Get current list of Artifact URLs +#RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_elasticsearch') +RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/agent_download_sources') + + +# Check to make sure that the server responded with good data - else, bail from script +CHECKSUM=$(jq -r '.page' <<< "$RAW_JSON") +if [ "$CHECKSUM" != "1" ]; then + printf "Failed to query for current Elastic Agent Artifact URLs..." + exit 1 +fi + +# Get the current list of Elastic Agent Artifact URLs & hash them +CURRENT_LIST=$(jq -c -r '.items[].host' <<< "$RAW_JSON") +CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}') + + +# Create array & add initial elements +if [ "{{ GLOBALS.hostname }}" = "{{ GLOBALS.url_base }}" ]; then + NEW_LIST=("http://{{ GLOBALS.url_base }}:8443/artifacts/") +else + NEW_LIST=("http://{{ GLOBALS.url_base }}:8443/artifacts/" "http://{{ GLOBALS.hostname }}:8443/artifacts/") +fi + +# Query for the current Grid Nodes that are running Logstash (which includes Fleet Nodes) +LOGSTASHNODES=$(salt-call --out=json pillar.get logstash:nodes | jq '.local') + +# Query for Fleet Nodes & add them to the list (Hostname) +if grep -q "fleet" <<< $LOGSTASHNODES; then + readarray -t FLEETNODES < <(jq -r ' .fleet | keys_unsorted[]' <<< $LOGSTASHNODES) + for NODE in "${FLEETNODES[@]}" + do + NEW_LIST+=("http://$NODE:8443/artifacts/") + done +fi + +# Sort & hash the new list of Fleet Elasticsearch URLs +NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "${NEW_LIST[@]}") +NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}') + +# Compare the current & new list of URLs - if different, update the Fleet Elasticsearch URLs +if [ "$1" = "--force" ]; then + printf "\nUpdating List, since --force was specified.\n" + printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n" + update_es_urls + exit 0 +fi + +if [ "$NEW_HASH" = "$CURRENT_HASH" ]; then + printf "\nHashes match - no update needed.\n" + printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n" + exit 0 +else + printf "\nHashes don't match - update needed.\n" + printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n" + #update_es_urls +fi diff --git a/salt/firewall/containers.map.jinja b/salt/firewall/containers.map.jinja index 0ba2389e9..99a3bd5d0 100644 --- a/salt/firewall/containers.map.jinja +++ b/salt/firewall/containers.map.jinja @@ -95,6 +95,7 @@ {% set NODE_CONTAINERS = [ 'so-elastic-fleet', 'so-logstash', + 'so-nginx' ] %} {% elif GLOBALS.role == 'so-sensor' %} diff --git a/salt/nginx/enabled.sls b/salt/nginx/enabled.sls index dda475655..eca9c237a 100644 --- a/salt/nginx/enabled.sls +++ b/salt/nginx/enabled.sls @@ -14,6 +14,9 @@ include: - nginx.config - nginx.sostatus + +{% if grains.role not in ['so-fleet'] %} + {# if the user has selected to replace the crt and key in the ui #} {% if NGINXMERGED.ssl.replace_cert %} @@ -88,6 +91,8 @@ make-rule-dir-nginx: - recurse: - user - group + +{% endif %} so-nginx: docker_container.running: From 7c08b348aafb9ee9a842e67356e4b09096217fbb Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Mon, 29 Jan 2024 10:16:34 -0500 Subject: [PATCH 027/777] Add comment for soup update w/ STIGs enabled Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/manager/tools/sbin/soup | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 86cb709dd..a250116d1 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -784,6 +784,8 @@ verify_latest_update_script() { if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" && "$CURRENTSOFIREWALL" == "$GITSOFIREWALL" ]]; then echo "Succesfully updated soup scripts." else + # When STIGs are enabled soup scripts will fail to update using --file-root --local. + # After checking that the expected hashes are not present, retry updating soup scripts using salt master. echo "There was a problem updating soup scripts.. Trying to rerun script update" salt-call state.apply common.soup_scripts queue=True -linfo fi From 0d08bb0a91f817efd7d66a8f818706c9b8afe65d Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Mon, 29 Jan 2024 11:37:28 -0500 Subject: [PATCH 028/777] Finalize script --- salt/elasticfleet/enabled.sls | 2 +- .../so-elastic-fleet-artifacts-url-update | 129 +++++++----------- 2 files changed, 51 insertions(+), 80 deletions(-) diff --git a/salt/elasticfleet/enabled.sls b/salt/elasticfleet/enabled.sls index dca7f479f..f5f53c2a5 100644 --- a/salt/elasticfleet/enabled.sls +++ b/salt/elasticfleet/enabled.sls @@ -40,7 +40,7 @@ so-elastic-fleet-auto-configure-elasticsearch-urls: - name: /usr/sbin/so-elastic-fleet-es-url-update - retry: True -so-elastic-fleet-auto-configure-elasticsearch-urls: +so-elastic-fleet-auto-configure-artifact-urls: cmd.run: - name: /usr/sbin/so-elastic-fleet-artifacts-url-update - retry: True diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-artifacts-url-update b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-artifacts-url-update index 685db392f..bcd3ef7f7 100644 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-artifacts-url-update +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-artifacts-url-update @@ -11,92 +11,63 @@ if ! is_manager_node; then exit 0 fi -########## -# Set Elastic Agent Artifact Registry URL - - - - -function update_es_urls() { - -# For each element in NEWLIST, create a new entry - -JSON_STRING=$( jq -n \ - --arg NAME "FleetServer_{{ GLOBALS.hostname }}" \ - --arg URL "http://{{ GLOBALS.url_base }}:8443/artifacts/" \ - '{"name":$NAME,"host":$URL,"is_default":true}' - ) - -curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/agent_download_sources" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" - -######### - - # Generate updated JSON payload -{% if grains.role not in ['so-import', 'so-eval'] %} - JSON_STRING=$(jq -n --arg UPDATEDLIST $NEW_LIST_JSON '{"name":"so-manager_elasticsearch","type":"elasticsearch","hosts": $UPDATEDLIST,"config_yaml":""}') -{%- else %} - JSON_STRING=$(jq -n --arg UPDATEDLIST $NEW_LIST_JSON '{"name":"so-manager_elasticsearch","type":"elasticsearch","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":""}') -{%- endif %} - # Update Fleet Elasticsearch URLs - curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_elasticsearch" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" +# Function to check if an array contains a value +array_contains () { + local array="$1[@]" + local seeking=$2 + local in=1 + for element in "${!array}"; do + if [[ $element == "$seeking" ]]; then + in=0 + break + fi + done + return $in } -#START HERE - -# Get current list of Artifact URLs -#RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_elasticsearch') -RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/agent_download_sources') - - -# Check to make sure that the server responded with good data - else, bail from script -CHECKSUM=$(jq -r '.page' <<< "$RAW_JSON") -if [ "$CHECKSUM" != "1" ]; then - printf "Failed to query for current Elastic Agent Artifact URLs..." - exit 1 -fi - -# Get the current list of Elastic Agent Artifact URLs & hash them -CURRENT_LIST=$(jq -c -r '.items[].host' <<< "$RAW_JSON") -CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}') - - -# Create array & add initial elements -if [ "{{ GLOBALS.hostname }}" = "{{ GLOBALS.url_base }}" ]; then - NEW_LIST=("http://{{ GLOBALS.url_base }}:8443/artifacts/") -else - NEW_LIST=("http://{{ GLOBALS.url_base }}:8443/artifacts/" "http://{{ GLOBALS.hostname }}:8443/artifacts/") -fi - # Query for the current Grid Nodes that are running Logstash (which includes Fleet Nodes) LOGSTASHNODES=$(salt-call --out=json pillar.get logstash:nodes | jq '.local') +# Initialize an array for new hosts from Fleet Nodes +declare -a NEW_LIST=() + # Query for Fleet Nodes & add them to the list (Hostname) -if grep -q "fleet" <<< $LOGSTASHNODES; then - readarray -t FLEETNODES < <(jq -r ' .fleet | keys_unsorted[]' <<< $LOGSTASHNODES) - for NODE in "${FLEETNODES[@]}" - do - NEW_LIST+=("http://$NODE:8443/artifacts/") - done +if grep -q "fleet" <<< "$LOGSTASHNODES"; then + readarray -t FLEETNODES < <(jq -r '.fleet | keys_unsorted[]' <<< "$LOGSTASHNODES") + for NODE in "${FLEETNODES[@]}"; do + NEW_LIST+=("http://$NODE:8443/artifacts/") + done fi -# Sort & hash the new list of Fleet Elasticsearch URLs -NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "${NEW_LIST[@]}") -NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}') +# Create an array for expected hosts and their names +declare -A expected_hosts=( + ["http://{{ GLOBALS.url_base }}:8443/artifacts/"]="FleetServer_{{ GLOBALS.hostname }}" + ["https://artifacts.elastic.co/downloads/"]="Elastic Artifacts" +) -# Compare the current & new list of URLs - if different, update the Fleet Elasticsearch URLs -if [ "$1" = "--force" ]; then - printf "\nUpdating List, since --force was specified.\n" - printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n" - update_es_urls - exit 0 -fi +# Merge NEW_LIST into expected_hosts +for host in "${NEW_LIST[@]}"; do + expected_hosts[$host]="FleetServer" +done -if [ "$NEW_HASH" = "$CURRENT_HASH" ]; then - printf "\nHashes match - no update needed.\n" - printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n" - exit 0 -else - printf "\nHashes don't match - update needed.\n" - printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n" - #update_es_urls -fi +# Fetch the current hosts from the API +current_hosts=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/agent_download_sources' | jq -r .items[].host) + +# Convert current hosts to an array +IFS=$'\n' read -rd '' -a current_hosts_array <<<"$current_hosts" + +# Check each expected host +for host in "${!expected_hosts[@]}"; do + array_contains current_hosts_array "$host" || { + echo "$host (${expected_hosts[$host]}) is missing. Adding it..." + + # Prepare the JSON payload + JSON_STRING=$( jq -n \ + --arg NAME "${expected_hosts[$host]}" \ + --arg URL "$host" \ + '{"name":$NAME,"host":$URL}' ) + + # Create the missing host + curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/agent_download_sources" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" + } +done From 1a2245a1ed188be34b2ffbcb6304567c9c5abdee Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 29 Jan 2024 13:44:53 -0500 Subject: [PATCH 029/777] Add so-minion modifications --- salt/manager/tools/sbin/so-minion | 25 +++++++++++++++++++++++++ salt/suricata/soc_suricata.yaml | 2 +- 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index d5225cc82..12349b680 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -79,6 +79,30 @@ function getinstallinfo() { source <(echo $INSTALLVARS) } +function pcapspace() { + + local NSMSIZE=$(salt \* disk.usage --out=json | jq -r '.[]."/nsm"."1K-blocks" ') + local ROOTSIZE=$(salt \* disk.usage --out=json | jq -r '.[]."/"."1K-blocks" ') + + if [[ "$NSMSIZE" == "null" ]]; then + # Looks like there is no dedicated nsm partition. Using root + local SPACESIZE=$ROOTSIZE + else + local SPACESIZE=$NSMSIZE + fi + + local s=$(( $SPACESIZE / 1000000 )) + local s1=$(( $s / 2 )) + local s2=$(( $s1 / $lb_procs )) + + printf '%s\n'\ + "suricata:"\ + " config:"\ + " output:"\ + " pcap-log: $s" >> $PILLARFILE + +} + function testMinion() { # Always run on the host, since this is going to be the manager of a distributed grid, or an eval/standalone. # Distributed managers must run this in order for the sensor nodes to have access to the so-tcpreplay image. @@ -252,6 +276,7 @@ function add_sensor_to_minion() { if [[ $is_pcaplimit ]]; then echo " config:" >> $PILLARFILE echo " diskfreepercentage: 60" >> $PILLARFILE + pcapspace fi echo " " >> $PILLARFILE } diff --git a/salt/suricata/soc_suricata.yaml b/salt/suricata/soc_suricata.yaml index 58a2273b9..5dddd7442 100644 --- a/salt/suricata/soc_suricata.yaml +++ b/salt/suricata/soc_suricata.yaml @@ -181,7 +181,7 @@ suricata: description: File size limit per thread. To determine max PCAP size multiple threads x max-files x limit. helpLink: suricata.html mode: - description: Suricata PCAP mode. Currenlty only multi is supported. + description: Suricata PCAP mode. Currently only multi is supported. advanced: True readonly: True helpLink: suricata.html From 5b05aec96aaa9cbf73294bce24fed2d59f5e4be4 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 29 Jan 2024 14:56:51 -0500 Subject: [PATCH 030/777] Target sspecific minion --- salt/manager/tools/sbin/so-minion | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index 12349b680..877796620 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -81,8 +81,8 @@ function getinstallinfo() { function pcapspace() { - local NSMSIZE=$(salt \* disk.usage --out=json | jq -r '.[]."/nsm"."1K-blocks" ') - local ROOTSIZE=$(salt \* disk.usage --out=json | jq -r '.[]."/"."1K-blocks" ') + local NSMSIZE=$(salt '$MINION_ID' disk.usage --out=json | jq -r '.[]."/nsm"."1K-blocks" ') + local ROOTSIZE=$(salt '$MINION_ID' disk.usage --out=json | jq -r '.[]."/"."1K-blocks" ') if [[ "$NSMSIZE" == "null" ]]; then # Looks like there is no dedicated nsm partition. Using root From 0c969312e2ee61eed8e6c0d571e44dc7ee7cdcec Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 29 Jan 2024 15:22:20 -0500 Subject: [PATCH 031/777] Add Globals --- salt/global/defaults.yaml | 2 ++ salt/global/soc_global.yaml | 5 +++++ salt/manager/tools/sbin/so-minion | 3 ++- salt/sensoroni/files/sensoroni.json | 11 ++++++++++- salt/suricata/soc_suricata.yaml | 4 +++- 5 files changed, 22 insertions(+), 3 deletions(-) create mode 100644 salt/global/defaults.yaml diff --git a/salt/global/defaults.yaml b/salt/global/defaults.yaml new file mode 100644 index 000000000..bd7244a58 --- /dev/null +++ b/salt/global/defaults.yaml @@ -0,0 +1,2 @@ +global: + pcapengine: STENO \ No newline at end of file diff --git a/salt/global/soc_global.yaml b/salt/global/soc_global.yaml index 14d637d50..fc1c09b1c 100644 --- a/salt/global/soc_global.yaml +++ b/salt/global/soc_global.yaml @@ -14,6 +14,11 @@ global: regex: ^(ZEEK|SURICATA)$ regexFailureMessage: You must enter either ZEEK or SURICATA. global: True + pcapengine: + description: What engine to use for generating pcap. Options are STENO and SURICATA. + regex: ^(STENO|SURICATA)$ + regexFailureMessage: You must enter either STENO or SURICATA. + global: True ids: description: Which IDS engine to use. Currently only Suricata is supported. global: True diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index 877796620..4995e1c9d 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -99,7 +99,8 @@ function pcapspace() { "suricata:"\ " config:"\ " output:"\ - " pcap-log: $s" >> $PILLARFILE + " pcap-log:"\ + " max-files: $s" >> $PILLARFILE } diff --git a/salt/sensoroni/files/sensoroni.json b/salt/sensoroni/files/sensoroni.json index 59ce500e3..c5608ba56 100644 --- a/salt/sensoroni/files/sensoroni.json +++ b/salt/sensoroni/files/sensoroni.json @@ -23,13 +23,22 @@ "importer": {}, "statickeyauth": { "apiKey": "{{ GLOBALS.sensoroni_key }}" -{%- if PCAPMERGED.enabled %} +{%- if PCAPMERGED.enabled %} +{%- if PCAPENGINE.steno %} }, "stenoquery": { "executablePath": "/opt/sensoroni/scripts/stenoquery.sh", "pcapInputPath": "/nsm/pcap", "pcapOutputPath": "/nsm/pcapout" } +{%- elif PCAPENGINE.suri %} + }, + "suriquery": { + "executablePath": "/opt/sensoroni/scripts/suriquery.sh", + "pcapInputPath": "/nsm/suripcap", + "pcapOutputPath": "/nsm/pcapout" + } +{%- endif %} {%- else %} } {%- endif %} diff --git a/salt/suricata/soc_suricata.yaml b/salt/suricata/soc_suricata.yaml index 5dddd7442..7153eb9a1 100644 --- a/salt/suricata/soc_suricata.yaml +++ b/salt/suricata/soc_suricata.yaml @@ -155,8 +155,10 @@ suricata: helpLink: suricata.html pcap-log: enabled: - description: Enable Suricata to collect PCAP. + description: This value is ignored by SO. pcapengine in globals takes predidence. + readonly: True helpLink: suricata.html + advanced: True compression: description: Enable compression of Suricata PCAP. Currently unsupported advanced: True From 88c01a22d6454b318c0c94a1718c93fb69b34500 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 29 Jan 2024 15:27:28 -0500 Subject: [PATCH 032/777] Add annotation logic --- salt/suricata/soc_suricata.yaml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/salt/suricata/soc_suricata.yaml b/salt/suricata/soc_suricata.yaml index 7153eb9a1..52352d043 100644 --- a/salt/suricata/soc_suricata.yaml +++ b/salt/suricata/soc_suricata.yaml @@ -44,6 +44,7 @@ suricata: set-cpu-affinity: description: Bind(yes) or unbind(no) management and worker threads to a core or range of cores. regex: ^(yes|no)$ + regexFailureMessage: You must enter either yes or no. helpLink: suricata.html cpu-affinity: management-cpu-set: @@ -155,7 +156,7 @@ suricata: helpLink: suricata.html pcap-log: enabled: - description: This value is ignored by SO. pcapengine in globals takes predidence. + description: This value is ignored by SO. pcapengine in globals takes precidence. readonly: True helpLink: suricata.html advanced: True @@ -193,9 +194,13 @@ suricata: use-stream-depth: description: Set to "no" to ignore the stream depth and capture the entire flow. Set this to "yes" to truncate the flow based on the stream depth. advanced: True + regex: ^(yes|no)$ + regexFailureMessage: You must enter either yes or no. helpLink: suricata.html conditional: description: Set to "all" to capture PCAP for all flows. Set to "alert" to capture PCAP just for alerts or set to "tag" to capture PCAP for just tagged rules. + regex: ^(all|alert|tag)$ + regexFailureMessage: You must enter either all, alert or tag. helpLink: suricata.html dir: description: Parent directory to store PCAP. From ab551a747ddafe1fd0b602e3f84d8130e9ffe5bc Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 29 Jan 2024 15:44:57 -0500 Subject: [PATCH 033/777] Threads placeholder logic --- salt/suricata/enabled.sls | 3 +++ salt/suricata/pcap.sls | 25 +++++++++++++++++++++++++ 2 files changed, 28 insertions(+) create mode 100644 salt/suricata/pcap.sls diff --git a/salt/suricata/enabled.sls b/salt/suricata/enabled.sls index ce309e41a..6dce49c8c 100644 --- a/salt/suricata/enabled.sls +++ b/salt/suricata/enabled.sls @@ -12,6 +12,9 @@ include: - suricata.config - suricata.sostatus + if blah + - suricata.pcap + endif so-suricata: docker_container.running: diff --git a/salt/suricata/pcap.sls b/salt/suricata/pcap.sls new file mode 100644 index 000000000..f677532f0 --- /dev/null +++ b/salt/suricata/pcap.sls @@ -0,0 +1,25 @@ +{% from 'vars/globals.map.jinja' import GLOBALS %} +{% import_yaml 'suricata/defaults.yaml' as SURICATADEFAULTS %} +{% set SURICATAMERGED = salt['pillar.get']('suricata', SURICATADEFAULTS.suricata, merge=True) %} + +suripcapdir: + file.directory: + - name: /nsm/suripcap + - user: 940 + - group: 939 + - mode: 755 + - makedirs: True + +{{ SURICATAMERGED.config['af-packet'].threads }} + +for thread in afp.threads + +suripcapthreaddir: + file.directory: + - name: /nsm/suripcap/{{thread}} + - user: 940 + - group: 939 + - mode: 755 + - makedirs: True + +endfor \ No newline at end of file From 88d2ddba8bddeeac28fbadf12c826c04a4a61e82 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 29 Jan 2024 15:53:54 -0500 Subject: [PATCH 034/777] add placeholder for telegraf --- salt/telegraf/scripts/oldpcap.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/salt/telegraf/scripts/oldpcap.sh b/salt/telegraf/scripts/oldpcap.sh index bb1be457f..d3f4b9a93 100644 --- a/salt/telegraf/scripts/oldpcap.sh +++ b/salt/telegraf/scripts/oldpcap.sh @@ -5,13 +5,18 @@ # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. +{%- if pcap is steno +PCAPLOC=/host/nsm/pcap +{%- else %} +PCAPLOC=/host/nsm/suripcap +{%- endif %} # if this script isn't already running if [[ ! "`pidof -x $(basename $0) -o %PPID`" ]]; then # Get the data - OLDPCAP=$(find /host/nsm/pcap -type f -exec stat -c'%n %Z' {} + | sort | grep -v "\." | head -n 1 | awk {'print $2'}) + OLDPCAP=$(find $PCAPLOC -type f -exec stat -c'%n %Z' {} + | sort | grep -v "\." | head -n 1 | awk {'print $2'}) DATE=$(date +%s) AGE=$(($DATE - $OLDPCAP)) From d118ff4728454cb08cd020234b6d858c5edc3656 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 29 Jan 2024 16:54:08 -0500 Subject: [PATCH 035/777] add GLOBALS.pcap_engine --- salt/global/map.jinja | 2 ++ salt/sensoroni/files/sensoroni.json | 6 +++--- salt/suricata/enabled.sls | 4 ++-- salt/suricata/pcap.sls | 13 +++++-------- salt/telegraf/config.sls | 2 ++ salt/telegraf/scripts/oldpcap.sh | 7 +++---- salt/vars/globals.map.jinja | 2 ++ 7 files changed, 19 insertions(+), 17 deletions(-) create mode 100644 salt/global/map.jinja diff --git a/salt/global/map.jinja b/salt/global/map.jinja new file mode 100644 index 000000000..54abb8c79 --- /dev/null +++ b/salt/global/map.jinja @@ -0,0 +1,2 @@ +{% import_yaml 'global/defaults.yaml' as GLOBALDEFAULTS %} +{% set GLOBALMERGED = salt['pillar.get']('global', GLOBALDEFAULTS.global, merge=True) %} diff --git a/salt/sensoroni/files/sensoroni.json b/salt/sensoroni/files/sensoroni.json index c5608ba56..f813dad2f 100644 --- a/salt/sensoroni/files/sensoroni.json +++ b/salt/sensoroni/files/sensoroni.json @@ -24,21 +24,21 @@ "statickeyauth": { "apiKey": "{{ GLOBALS.sensoroni_key }}" {%- if PCAPMERGED.enabled %} -{%- if PCAPENGINE.steno %} +{%- if GLOBALS.pcap_engine == "STENO" %} }, "stenoquery": { "executablePath": "/opt/sensoroni/scripts/stenoquery.sh", "pcapInputPath": "/nsm/pcap", "pcapOutputPath": "/nsm/pcapout" } -{%- elif PCAPENGINE.suri %} +{%- elif GLOBALS.pcap_engine == "SURICATA" %} }, "suriquery": { "executablePath": "/opt/sensoroni/scripts/suriquery.sh", "pcapInputPath": "/nsm/suripcap", "pcapOutputPath": "/nsm/pcapout" } -{%- endif %} +{%- endif %} {%- else %} } {%- endif %} diff --git a/salt/suricata/enabled.sls b/salt/suricata/enabled.sls index 6dce49c8c..cf871906b 100644 --- a/salt/suricata/enabled.sls +++ b/salt/suricata/enabled.sls @@ -12,9 +12,9 @@ include: - suricata.config - suricata.sostatus - if blah +{% if GLOBALS.pcap_engine == "SURICATA" %} - suricata.pcap - endif +{% endif %} so-suricata: docker_container.running: diff --git a/salt/suricata/pcap.sls b/salt/suricata/pcap.sls index f677532f0..7a00d8d6a 100644 --- a/salt/suricata/pcap.sls +++ b/salt/suricata/pcap.sls @@ -1,6 +1,5 @@ {% from 'vars/globals.map.jinja' import GLOBALS %} -{% import_yaml 'suricata/defaults.yaml' as SURICATADEFAULTS %} -{% set SURICATAMERGED = salt['pillar.get']('suricata', SURICATADEFAULTS.suricata, merge=True) %} +{% from 'suricata/map.jinja' import SURICATAMERGED %} suripcapdir: file.directory: @@ -10,16 +9,14 @@ suripcapdir: - mode: 755 - makedirs: True -{{ SURICATAMERGED.config['af-packet'].threads }} -for thread in afp.threads +{% for i in range(1, SURICATAMERGED.config['af-packet'].threads) + 1) %} -suripcapthreaddir: +suripcapthread{{i}}dir: file.directory: - - name: /nsm/suripcap/{{thread}} + - name: /nsm/suripcap/{{i}} - user: 940 - group: 939 - mode: 755 - - makedirs: True -endfor \ No newline at end of file +{% endfor %} diff --git a/salt/telegraf/config.sls b/salt/telegraf/config.sls index 0711260b5..a35be55f5 100644 --- a/salt/telegraf/config.sls +++ b/salt/telegraf/config.sls @@ -41,6 +41,8 @@ tgraf_sync_script_{{script}}: - mode: 770 - template: jinja - source: salt://telegraf/scripts/{{script}} + - defaults: + GLOBALS: {{ GLOBALS }} {% endfor %} telegraf_sbin: diff --git a/salt/telegraf/scripts/oldpcap.sh b/salt/telegraf/scripts/oldpcap.sh index d3f4b9a93..b68e71539 100644 --- a/salt/telegraf/scripts/oldpcap.sh +++ b/salt/telegraf/scripts/oldpcap.sh @@ -5,13 +5,12 @@ # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. -{%- if pcap is steno -PCAPLOC=/host/nsm/pcap -{%- else %} +{%- if GLOBALS.pcap_engine == "SURICATA" %} PCAPLOC=/host/nsm/suripcap +{%- else %} +PCAPLOC=/host/nsm/pcap {%- endif %} - # if this script isn't already running if [[ ! "`pidof -x $(basename $0) -o %PPID`" ]]; then diff --git a/salt/vars/globals.map.jinja b/salt/vars/globals.map.jinja index 3265cde18..990aeb39b 100644 --- a/salt/vars/globals.map.jinja +++ b/salt/vars/globals.map.jinja @@ -1,5 +1,6 @@ {% import 'vars/init.map.jinja' as INIT %} {% from 'docker/docker.map.jinja' import DOCKER %} +{% from 'global/map.jinja' import GLOBALMERGED %} {% from 'vars/' ~ INIT.GRAINS.role.split('-')[1] ~ '.map.jinja' import ROLE_GLOBALS %} {# role is so-role so we have to split off the 'so' #} @@ -20,6 +21,7 @@ 'influxdb_host': INIT.PILLAR.global.influxdb_host, 'manager_ip': INIT.PILLAR.global.managerip, 'md_engine': INIT.PILLAR.global.mdengine, + 'pcap_engine': GLOBALMERGED.pcapengine 'pipeline': INIT.PILLAR.global.pipeline, 'so_version': INIT.PILLAR.global.soversion, 'so_docker_gateway': DOCKER.gateway, From 37dcb84a09d836ec1b772fe25c42051e7bfdf797 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 30 Jan 2024 10:50:01 -0500 Subject: [PATCH 036/777] add missing comma --- salt/vars/globals.map.jinja | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/vars/globals.map.jinja b/salt/vars/globals.map.jinja index 990aeb39b..624173217 100644 --- a/salt/vars/globals.map.jinja +++ b/salt/vars/globals.map.jinja @@ -21,7 +21,7 @@ 'influxdb_host': INIT.PILLAR.global.influxdb_host, 'manager_ip': INIT.PILLAR.global.managerip, 'md_engine': INIT.PILLAR.global.mdengine, - 'pcap_engine': GLOBALMERGED.pcapengine + 'pcap_engine': GLOBALMERGED.pcapengine, 'pipeline': INIT.PILLAR.global.pipeline, 'so_version': INIT.PILLAR.global.soversion, 'so_docker_gateway': DOCKER.gateway, From 0522dc180a753ddb5886c9b370df6fc6662eef10 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 30 Jan 2024 13:39:35 -0500 Subject: [PATCH 037/777] map pcap dir to container. enable pcap-log in map --- salt/suricata/enabled.sls | 3 +++ salt/suricata/map.jinja | 5 +++++ salt/suricata/pcap.sls | 4 ++-- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/salt/suricata/enabled.sls b/salt/suricata/enabled.sls index cf871906b..8b2776b39 100644 --- a/salt/suricata/enabled.sls +++ b/salt/suricata/enabled.sls @@ -35,6 +35,9 @@ so-suricata: - /nsm/suricata/:/nsm/:rw - /nsm/suricata/extracted:/var/log/suricata//filestore:rw - /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro + {% if GLOBALS.pcap_engine == "SURICATA" %} + - /nsm/suripcap/:/nsm/pcap:rw + {% endif %} {% if DOCKER.containers['so-suricata'].custom_bind_mounts %} {% for BIND in DOCKER.containers['so-suricata'].custom_bind_mounts %} - {{ BIND }} diff --git a/salt/suricata/map.jinja b/salt/suricata/map.jinja index 01d019de8..5f6e913f5 100644 --- a/salt/suricata/map.jinja +++ b/salt/suricata/map.jinja @@ -60,6 +60,11 @@ {% do SURICATAMERGED.config.outputs['file-store'].update({'enabled':suricata_mdengine.suricata.config.outputs[surimeta_filestore_index]['file-store']['enabled']}) %} {% endif %} +{# before we change outputs back to list, enable pcap-log if suricata is the pcapengine #} +{% if GLOBALS.pcap_engine == "SURICATA" %} +{% do SURICATAMERGED.config.outputs['pcap-log'].update({'enabled': 'yes'}) %} +{% endif %} + {# outputs is a list but we convert to dict in defaults to work with ui #} {# below they are converted back to lists #} {% load_yaml as outputs %} diff --git a/salt/suricata/pcap.sls b/salt/suricata/pcap.sls index 7a00d8d6a..a3cbafa0a 100644 --- a/salt/suricata/pcap.sls +++ b/salt/suricata/pcap.sls @@ -9,8 +9,8 @@ suripcapdir: - mode: 755 - makedirs: True - -{% for i in range(1, SURICATAMERGED.config['af-packet'].threads) + 1) %} +{# there should only be 1 interface in af-packet so we can just reference the first list item #} +{% for i in range(1, SURICATAMERGED.config['af-packet'][0].threads + 1) %} suripcapthread{{i}}dir: file.directory: From 8ed66ea468b5eea0d3230db0e23ea34fd3267762 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 30 Jan 2024 15:22:32 -0500 Subject: [PATCH 038/777] disable stenographer if suricata is pcap engine --- salt/pcap/config.map.jinja | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/salt/pcap/config.map.jinja b/salt/pcap/config.map.jinja index 7ed500f25..e6d9f8bda 100644 --- a/salt/pcap/config.map.jinja +++ b/salt/pcap/config.map.jinja @@ -2,6 +2,12 @@ or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at https://securityonion.net/license; you may not use this file except in compliance with the Elastic License 2.0. #} - + +{% from 'vars/globals.map.jinja' import GLOBALS %} {% import_yaml 'pcap/defaults.yaml' as PCAPDEFAULTS %} {% set PCAPMERGED = salt['pillar.get']('pcap', PCAPDEFAULTS.pcap, merge=True) %} + +{# disable stenographer if the pcap engine is set to SURICATA #} +{% if GLOBALS.pcap_engine == "SURICATA" %} +{% do PCAPMERGED.update({'enabled': False}) %} +{% endif %} From f32cb1f1153d691394f58295770717b781929ed9 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 30 Jan 2024 15:48:10 -0500 Subject: [PATCH 039/777] fix find to work with steno and suri pcap --- salt/telegraf/scripts/oldpcap.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/telegraf/scripts/oldpcap.sh b/salt/telegraf/scripts/oldpcap.sh index b68e71539..438ce912c 100644 --- a/salt/telegraf/scripts/oldpcap.sh +++ b/salt/telegraf/scripts/oldpcap.sh @@ -15,7 +15,7 @@ PCAPLOC=/host/nsm/pcap if [[ ! "`pidof -x $(basename $0) -o %PPID`" ]]; then # Get the data - OLDPCAP=$(find $PCAPLOC -type f -exec stat -c'%n %Z' {} + | sort | grep -v "\." | head -n 1 | awk {'print $2'}) + OLDPCAP=$(find $PCAPLOC -type f -exec stat -c'%n %Z' {} + | sort | grep -v "/\." | head -n 1 | awk {'print $2'}) DATE=$(date +%s) AGE=$(($DATE - $OLDPCAP)) From b5ffa186fb35f104f9e7d6bca954b5a9af3df442 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 30 Jan 2024 15:54:23 -0500 Subject: [PATCH 040/777] Remove remediate from initial oscap scan Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/stig/enabled.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/stig/enabled.sls b/salt/stig/enabled.sls index 5c4b6851b..5f6c72c92 100644 --- a/salt/stig/enabled.sls +++ b/salt/stig/enabled.sls @@ -50,7 +50,7 @@ update_stig_profile: run_initial_scan: module.run: - name: openscap.xccdf - - params: 'eval --remediate --profile {{ OSCAP_PROFILE_NAME }} --results {{ OSCAP_OUTPUT_DIR }}/pre-oscap-results.xml --report {{ OSCAP_OUTPUT_DIR }}/pre-oscap-report.html {{ OSCAP_PROFILE_LOCATION }}' + - params: 'eval --profile {{ OSCAP_PROFILE_NAME }} --results {{ OSCAP_OUTPUT_DIR }}/pre-oscap-results.xml --report {{ OSCAP_OUTPUT_DIR }}/pre-oscap-report.html {{ OSCAP_PROFILE_LOCATION }}' {% endif %} run_remediate: From 8b503e2ffa722977841947590195b1aae1a90663 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 30 Jan 2024 15:58:11 -0500 Subject: [PATCH 041/777] telegraf dont run stenoloss script if suricata is pcap engine --- salt/telegraf/map.jinja | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/salt/telegraf/map.jinja b/salt/telegraf/map.jinja index e6d3460d6..b56c8a64d 100644 --- a/salt/telegraf/map.jinja +++ b/salt/telegraf/map.jinja @@ -14,4 +14,11 @@ {% do TELEGRAFMERGED.scripts[GLOBALS.role.split('-')[1]].remove('zeekloss.sh') %} {% do TELEGRAFMERGED.scripts[GLOBALS.role.split('-')[1]].remove('zeekcaptureloss.sh') %} {% endif %} + +{% from 'pcap/config.map.jinja' import PCAPMERGED %} +{# PCAPMERGED.enabled is set false in soc ui or if suricata is the pcap engine #} +{% if not PCAPMERGED.enabled %} +{% do TELEGRAFMERGED.scripts[GLOBALS.role.split('-')[1]].remove('stenoloss.sh') %} +{% endif %} + {% endif %} From 8a25748e3309fea6b8f5f01537e4c8f9bdafc65f Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 30 Jan 2024 16:06:24 -0500 Subject: [PATCH 042/777] grammar --- salt/global/soc_global.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/global/soc_global.yaml b/salt/global/soc_global.yaml index fc1c09b1c..d707fb1cc 100644 --- a/salt/global/soc_global.yaml +++ b/salt/global/soc_global.yaml @@ -10,12 +10,12 @@ global: regex: ^(([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?)?$ regexFailureMessage: You must enter a valid IP address or CIDR. mdengine: - description: What engine to use for meta data generation. Options are ZEEK and SURICATA. + description: Which engine to use for meta data generation. Options are ZEEK and SURICATA. regex: ^(ZEEK|SURICATA)$ regexFailureMessage: You must enter either ZEEK or SURICATA. global: True pcapengine: - description: What engine to use for generating pcap. Options are STENO and SURICATA. + description: Which engine to use for generating pcap. Options are STENO and SURICATA. regex: ^(STENO|SURICATA)$ regexFailureMessage: You must enter either STENO or SURICATA. global: True From 0fa4d92f8ff8beb7d8e597bf357ab3a578c04c51 Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Thu, 19 Oct 2023 15:49:56 -0600 Subject: [PATCH 043/777] socsigmarepo Need write permissions on the /opt/so/rules dir so I can clone the sigma repo there. --- salt/soc/config.sls | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/salt/soc/config.sls b/salt/soc/config.sls index 902d82ec7..95135566b 100644 --- a/salt/soc/config.sls +++ b/salt/soc/config.sls @@ -114,6 +114,13 @@ socuploaddir: - group: 939 - makedirs: True +socsigmarepo: + file.directory: + - name: /opt/so/rules + - user: 939 + - group: 939 + - mode: 775 + {% else %} {{sls}}_state_not_allowed: From 4be1214bab11f57286f042ce7dbebd76bcbb8259 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 30 Jan 2024 16:53:57 -0500 Subject: [PATCH 044/777] pcap engine logic for sensoroni --- salt/sensoroni/files/sensoroni.json | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/salt/sensoroni/files/sensoroni.json b/salt/sensoroni/files/sensoroni.json index f813dad2f..97c91f0b3 100644 --- a/salt/sensoroni/files/sensoroni.json +++ b/salt/sensoroni/files/sensoroni.json @@ -1,6 +1,7 @@ {%- from 'vars/globals.map.jinja' import GLOBALS %} {%- from 'sensoroni/map.jinja' import SENSORONIMERGED %} {%- from 'pcap/config.map.jinja' import PCAPMERGED %} +{%- from 'suricata/map.jinja' import SURICATAMERGED %} { "logFilename": "/opt/sensoroni/logs/sensoroni.log", "logLevel":"info", @@ -23,22 +24,22 @@ "importer": {}, "statickeyauth": { "apiKey": "{{ GLOBALS.sensoroni_key }}" +{#- if PCAPMERGED.enabled is true then we know that steno is the pcap engine #} +{#- if it is false, then user has steno disabled in ui or has selected suricata for pcap engine #} {%- if PCAPMERGED.enabled %} -{%- if GLOBALS.pcap_engine == "STENO" %} }, "stenoquery": { "executablePath": "/opt/sensoroni/scripts/stenoquery.sh", "pcapInputPath": "/nsm/pcap", "pcapOutputPath": "/nsm/pcapout" } -{%- elif GLOBALS.pcap_engine == "SURICATA" %} +{%- elif GLOBALS.pcap_engine == "SURICATA" and SURICATAMERGED.enabled %} }, "suriquery": { "executablePath": "/opt/sensoroni/scripts/suriquery.sh", "pcapInputPath": "/nsm/suripcap", "pcapOutputPath": "/nsm/pcapout" } -{%- endif %} {%- else %} } {%- endif %} From 858166bcae280be03c5b416c912fd8c4a7ac8d61 Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Tue, 30 Jan 2024 15:43:51 -0700 Subject: [PATCH 045/777] WIP: Detections Changes Removed some strelka/yara rules from salt. Removed yara scripts for downloading and updating rules. This will be managed by SOC. Added a new compile_yara.py script. Added the strelka repos folder. --- salt/manager/init.sls | 53 +------------------ .../manager/tools/sbin_jinja/so-yara-download | 51 ------------------ salt/manager/tools/sbin_jinja/so-yara-update | 41 -------------- salt/soc/files/bin/compile_yara.py | 14 +++++ salt/strelka/backend/config.sls | 10 ---- salt/strelka/config.sls | 9 +++- 6 files changed, 24 insertions(+), 154 deletions(-) delete mode 100644 salt/manager/tools/sbin_jinja/so-yara-download delete mode 100755 salt/manager/tools/sbin_jinja/so-yara-update create mode 100644 salt/soc/files/bin/compile_yara.py diff --git a/salt/manager/init.sls b/salt/manager/init.sls index 23ef189b5..51590a6ec 100644 --- a/salt/manager/init.sls +++ b/salt/manager/init.sls @@ -1,5 +1,5 @@ # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. @@ -61,7 +61,7 @@ manager_sbin: - user: 939 - group: 939 - file_mode: 755 - - exclude_pat: + - exclude_pat: - "*_test.py" yara_update_scripts: @@ -103,55 +103,6 @@ rules_dir: - group: socore - makedirs: True -{% if STRELKAMERGED.rules.enabled %} - -strelkarepos: - file.managed: - - name: /opt/so/conf/strelka/repos.txt - - source: salt://strelka/rules/repos.txt.jinja - - template: jinja - - defaults: - STRELKAREPOS: {{ STRELKAMERGED.rules.repos }} - - makedirs: True - -strelka-yara-update: - {% if MANAGERMERGED.reposync.enabled and not GLOBALS.airgap %} - cron.present: - {% else %} - cron.absent: - {% endif %} - - user: socore - - name: '/usr/sbin/so-yara-update >> /opt/so/log/yarasync/yara-update.log 2>&1' - - identifier: strelka-yara-update - - hour: '7' - - minute: '1' - -strelka-yara-download: - {% if MANAGERMERGED.reposync.enabled and not GLOBALS.airgap %} - cron.present: - {% else %} - cron.absent: - {% endif %} - - user: socore - - name: '/usr/sbin/so-yara-download >> /opt/so/log/yarasync/yara-download.log 2>&1' - - identifier: strelka-yara-download - - hour: '7' - - minute: '1' - -{% if not GLOBALS.airgap %} -update_yara_rules: - cmd.run: - - name: /usr/sbin/so-yara-update - - onchanges: - - file: yara_update_scripts - -download_yara_rules: - cmd.run: - - name: /usr/sbin/so-yara-download - - onchanges: - - file: yara_update_scripts -{% endif %} -{% endif %} {% else %} {{sls}}_state_not_allowed: diff --git a/salt/manager/tools/sbin_jinja/so-yara-download b/salt/manager/tools/sbin_jinja/so-yara-download deleted file mode 100644 index aa9576253..000000000 --- a/salt/manager/tools/sbin_jinja/so-yara-download +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/bash -NOROOT=1 -. /usr/sbin/so-common - -{%- set proxy = salt['pillar.get']('manager:proxy') %} -{%- set noproxy = salt['pillar.get']('manager:no_proxy', '') %} - -# Download the rules from the internet -{%- if proxy %} -export http_proxy={{ proxy }} -export https_proxy={{ proxy }} -export no_proxy="{{ noproxy }}" -{%- endif %} - -repos="/opt/so/conf/strelka/repos.txt" -output_dir=/nsm/rules/yara -gh_status=$(curl -s -o /dev/null -w "%{http_code}" https://github.com) -clone_dir="/tmp" -if [ "$gh_status" == "200" ] || [ "$gh_status" == "301" ]; then - - while IFS= read -r repo; do - if ! $(echo "$repo" | grep -qE '^#'); then - # Remove old repo if existing bc of previous error condition or unexpected disruption - repo_name=`echo $repo | awk -F '/' '{print $NF}'` - [ -d $output_dir/$repo_name ] && rm -rf $output_dir/$repo_name - - # Clone repo and make appropriate directories for rules - git clone $repo $clone_dir/$repo_name - echo "Analyzing rules from $clone_dir/$repo_name..." - mkdir -p $output_dir/$repo_name - # Ensure a copy of the license is available for the rules - [ -f $clone_dir/$repo_name/LICENSE ] && cp $clone_dir/$repo_name/LICENSE $output_dir/$repo_name - - # Copy over rules - for i in $(find $clone_dir/$repo_name -name "*.yar*"); do - rule_name=$(echo $i | awk -F '/' '{print $NF}') - cp $i $output_dir/$repo_name - done - rm -rf $clone_dir/$repo_name - fi - done < $repos - - echo "Done!" - -/usr/sbin/so-yara-update - -else - echo "Server returned $gh_status status code." - echo "No connectivity to Github...exiting..." - exit 1 -fi diff --git a/salt/manager/tools/sbin_jinja/so-yara-update b/salt/manager/tools/sbin_jinja/so-yara-update deleted file mode 100755 index 07c940f47..000000000 --- a/salt/manager/tools/sbin_jinja/so-yara-update +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -NOROOT=1 -. /usr/sbin/so-common - -echo "Starting to check for yara rule updates at $(date)..." - -newcounter=0 -excludedcounter=0 -excluded_rules=({{ EXCLUDEDRULES | join(' ') }}) - -# Pull down the SO Rules -SORULEDIR=/nsm/rules/yara -OUTPUTDIR=/opt/so/saltstack/local/salt/strelka/rules - -mkdir -p $OUTPUTDIR -# remove all rules prior to copy so we can clear out old rules -rm -f $OUTPUTDIR/* - -for i in $(find $SORULEDIR -name "*.yar" -o -name "*.yara"); do - rule_name=$(echo $i | awk -F '/' '{print $NF}') - if [[ ! "${excluded_rules[*]}" =~ ${rule_name} ]]; then - echo "Adding rule: $rule_name..." - cp $i $OUTPUTDIR/$rule_name - ((newcounter++)) - else - echo "Excluding rule: $rule_name..." - ((excludedcounter++)) - fi -done - -if [ "$newcounter" -gt 0 ] || [ "$excludedcounter" -gt 0 ];then - echo "$newcounter rules added." - echo "$excludedcounter rule(s) excluded." -fi - -echo "Finished rule updates at $(date)..." diff --git a/salt/soc/files/bin/compile_yara.py b/salt/soc/files/bin/compile_yara.py new file mode 100644 index 000000000..43c8b1a09 --- /dev/null +++ b/salt/soc/files/bin/compile_yara.py @@ -0,0 +1,14 @@ +import os +import yara +import glob +import sys + +def compile_yara_rules(rules_dir: str) -> None: + compiled_rules_path: str = os.path.join(rules_dir, "rules.yar.compiled") + rule_files: list[str] = glob.glob(os.path.join(rules_dir, '**/*.yar'), recursive=True) + + if rule_files: + rules: yara.Rules = yara.compile(filepaths={os.path.basename(f): f for f in rule_files}) + rules.save(compiled_rules_path) + +compile_yara_rules(sys.argv[1]) diff --git a/salt/strelka/backend/config.sls b/salt/strelka/backend/config.sls index d51debb1b..b39e06ac8 100644 --- a/salt/strelka/backend/config.sls +++ b/salt/strelka/backend/config.sls @@ -50,16 +50,6 @@ backend_taste: - user: 939 - group: 939 -{% if STRELKAMERGED.rules.enabled %} -strelkarules: - file.recurse: - - name: /opt/so/conf/strelka/rules - - source: salt://strelka/rules - - user: 939 - - group: 939 - - clean: True -{% endif %} - {% else %} {{sls}}_state_not_allowed: diff --git a/salt/strelka/config.sls b/salt/strelka/config.sls index 1d0f75adf..929bef113 100644 --- a/salt/strelka/config.sls +++ b/salt/strelka/config.sls @@ -1,5 +1,5 @@ # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. @@ -21,6 +21,13 @@ strelkarulesdir: - group: 939 - makedirs: True +strelkareposdir: + file.directory: + - name: /opt/so/conf/strelka/repos + - user: 939 + - group: 939 + - makedirs: True + strelkadatadir: file.directory: - name: /nsm/strelka From 00289c201ee39e5bba2836f30106dbe7957acf61 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 31 Jan 2024 08:58:57 -0500 Subject: [PATCH 046/777] fix pcap paths --- salt/sensoroni/enabled.sls | 3 +++ salt/suricata/defaults.yaml | 2 +- salt/suricata/enabled.sls | 2 +- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/salt/sensoroni/enabled.sls b/salt/sensoroni/enabled.sls index 2111e8f1b..07b4df10a 100644 --- a/salt/sensoroni/enabled.sls +++ b/salt/sensoroni/enabled.sls @@ -23,6 +23,9 @@ so-sensoroni: - /opt/so/conf/sensoroni/sensoroni.json:/opt/sensoroni/sensoroni.json:ro - /opt/so/conf/sensoroni/analyzers:/opt/sensoroni/analyzers:rw - /opt/so/log/sensoroni:/opt/sensoroni/logs:rw + {% if GLOBALS.pcap_engine == "SURICATA" %} + - /nsm/suripcap/:/nsm/suripcap:rw + {% endif %} {% if DOCKER.containers['so-sensoroni'].custom_bind_mounts %} {% for BIND in DOCKER.containers['so-sensoroni'].custom_bind_mounts %} - {{ BIND }} diff --git a/salt/suricata/defaults.yaml b/salt/suricata/defaults.yaml index 4961ae50a..eb2c181e3 100644 --- a/salt/suricata/defaults.yaml +++ b/salt/suricata/defaults.yaml @@ -137,7 +137,7 @@ suricata: max-files: 10 use-stream-depth: "no" conditional: "all" - dir: "/nsm/pcap" + dir: "/nsm/suripcap" alert-debug: enabled: "no" alert-prelude: diff --git a/salt/suricata/enabled.sls b/salt/suricata/enabled.sls index 8b2776b39..fa1ebafef 100644 --- a/salt/suricata/enabled.sls +++ b/salt/suricata/enabled.sls @@ -36,7 +36,7 @@ so-suricata: - /nsm/suricata/extracted:/var/log/suricata//filestore:rw - /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro {% if GLOBALS.pcap_engine == "SURICATA" %} - - /nsm/suripcap/:/nsm/pcap:rw + - /nsm/suripcap/:/nsm/suripcap:rw {% endif %} {% if DOCKER.containers['so-suricata'].custom_bind_mounts %} {% for BIND in DOCKER.containers['so-suricata'].custom_bind_mounts %} From 1192dbd5305c190460effdc28ccbd85d79427cf7 Mon Sep 17 00:00:00 2001 From: Pete Date: Wed, 31 Jan 2024 09:01:56 -0500 Subject: [PATCH 047/777] also remove intca symlink The symlink is created in init.sls; it should be removed here. --- salt/ssl/remove.sls | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/salt/ssl/remove.sls b/salt/ssl/remove.sls index 43a245288..7738490e5 100644 --- a/salt/ssl/remove.sls +++ b/salt/ssl/remove.sls @@ -2,6 +2,10 @@ trusttheca: file.absent: - name: /etc/pki/tls/certs/intca.crt +symlinkca: + file.absent: + - name: /etc/ssl/certs/intca.crt + influxdb_key: file.absent: - name: /etc/pki/influxdb.key From 0d01d09d2e8805287cfd061038b1df64ff1348c5 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 31 Jan 2024 09:15:35 -0500 Subject: [PATCH 048/777] fix pcap paths --- salt/sensoroni/enabled.sls | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/sensoroni/enabled.sls b/salt/sensoroni/enabled.sls index 07b4df10a..6dc3df2bd 100644 --- a/salt/sensoroni/enabled.sls +++ b/salt/sensoroni/enabled.sls @@ -25,6 +25,7 @@ so-sensoroni: - /opt/so/log/sensoroni:/opt/sensoroni/logs:rw {% if GLOBALS.pcap_engine == "SURICATA" %} - /nsm/suripcap/:/nsm/suripcap:rw + - /nsm/suripcaptmp:/nsm/suripcaptmp:rw {% endif %} {% if DOCKER.containers['so-sensoroni'].custom_bind_mounts %} {% for BIND in DOCKER.containers['so-sensoroni'].custom_bind_mounts %} From 585147d1de66d700849a23057f7af85c97421433 Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Wed, 31 Jan 2024 10:39:47 -0700 Subject: [PATCH 049/777] Added so-detection mapping in elasticsearch --- salt/elasticsearch/defaults.yaml | 31 ++++- .../component/so/detection-mappings.json | 108 ++++++++++++++++++ .../component/so/detection-settings.json | 7 ++ 3 files changed, 145 insertions(+), 1 deletion(-) create mode 100644 salt/elasticsearch/templates/component/so/detection-mappings.json create mode 100644 salt/elasticsearch/templates/component/so/detection-settings.json diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index ce1bfb08d..480467129 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -117,6 +117,35 @@ elasticsearch: sort: field: '@timestamp' order: desc + so-detection: + index_sorting: false + index_template: + composed_of: + - detection-mappings + - detection-settings + index_patterns: + - so-detection* + priority: 500 + template: + mappings: + date_detection: false + dynamic_templates: + - strings_as_keyword: + mapping: + ignore_above: 1024 + type: keyword + match_mapping_type: string + settings: + index: + mapping: + total_fields: + limit: 1500 + number_of_replicas: 0 + number_of_shards: 1 + refresh_interval: 30s + sort: + field: '@timestamp' + order: desc so-common: close: 30 delete: 365 @@ -8909,7 +8938,7 @@ elasticsearch: actions: set_priority: priority: 50 - min_age: 30d + min_age: 30d so-logs-ti_otx_x_threat: index_sorting: false index_template: diff --git a/salt/elasticsearch/templates/component/so/detection-mappings.json b/salt/elasticsearch/templates/component/so/detection-mappings.json new file mode 100644 index 000000000..df53308f2 --- /dev/null +++ b/salt/elasticsearch/templates/component/so/detection-mappings.json @@ -0,0 +1,108 @@ +{ + "template": { + "mappings": { + "properties": { + "so_audit_doc_id": { + "ignore_above": 1024, + "type": "keyword" + }, + "@timestamp": { + "type": "date" + }, + "so_kind": { + "ignore_above": 1024, + "type": "keyword" + }, + "so_operation": { + "ignore_above": 1024, + "type": "keyword" + }, + "so_detection": { + "properties": { + "publicId": { + "type": "text" + }, + "title": { + "type": "text" + }, + "severity": { + "ignore_above": 1024, + "type": "keyword" + }, + "author": { + "type": "text" + }, + "description": { + "type": "text" + }, + "content": { + "type": "text" + }, + "isEnabled": { + "type": "boolean" + }, + "isReporting": { + "type": "boolean" + }, + "isCommunity": { + "type": "boolean" + }, + "note": { + "type": "text" + }, + "engine": { + "ignore_above": 1024, + "type": "keyword" + }, + "overrides": { + "properties": { + "type": { + "ignore_above": 1024, + "type": "keyword" + }, + "isEnabled": { + "type": "boolean" + }, + "createdAt": { + "type": "date" + }, + "updatedAt": { + "type": "date" + }, + "regex": { + "type": "text" + }, + "value": { + "type": "text" + }, + "thresholdType": { + "ignore_above": 1024, + "type": "keyword" + }, + "track": { + "ignore_above": 1024, + "type": "keyword" + }, + "ip": { + "type": "text" + }, + "count": { + "type": "long" + }, + "seconds": { + "type": "long" + }, + "customFilter": { + "type": "text" + } + } + } + } + } + } + } + }, + "_meta": { + "ecs_version": "1.12.2" + } +} \ No newline at end of file diff --git a/salt/elasticsearch/templates/component/so/detection-settings.json b/salt/elasticsearch/templates/component/so/detection-settings.json new file mode 100644 index 000000000..7b0947a4c --- /dev/null +++ b/salt/elasticsearch/templates/component/so/detection-settings.json @@ -0,0 +1,7 @@ +{ + "template": {}, + "version": 1, + "_meta": { + "description": "default settings for common Security Onion Detections indices" + } +} \ No newline at end of file From cd4bd6460aa117ebc798c6f74f5ddfa32b6fc65f Mon Sep 17 00:00:00 2001 From: Wes Date: Wed, 31 Jan 2024 20:16:18 +0000 Subject: [PATCH 050/777] Custom pipelines --- salt/elasticsearch/defaults.yaml | 81 ++++++++++++++++++++++++++++++++ 1 file changed, 81 insertions(+) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index ce1bfb08d..541f3fae7 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -55,6 +55,87 @@ elasticsearch: key: /usr/share/elasticsearch/config/elasticsearch.key verification_mode: none enabled: false + pipelines: + custom01: + description: Custom Pipeline + processors: + - set: + field: tags + value: custom01 + - pipeline: + name: common + custom02: + description: Custom Pipeline + processors: + - set: + field: tags + value: custom02 + - pipeline: + name: common + custom03: + description: Custom Pipeline + processors: + - set: + field: tags + value: custom03 + - pipeline: + name: common + custom04: + description: Custom Pipeline + processors: + - set: + field: tags + value: custom04 + - pipeline: + name: common + custom05: + description: Custom Pipeline + processors: + - set: + field: tags + value: custom05 + - pipeline: + name: common + custom06: + description: Custom Pipeline + processors: + - set: + field: tags + value: custom06 + - pipeline: + name: common + custom07: + description: Custom Pipeline + processors: + - set: + field: tags + value: custom07 + - pipeline: + name: common + custom08: + description: Custom Pipeline + processors: + - set: + field: tags + value: custom08 + - pipeline: + name: common + custom09: + description: Custom Pipeline + processors: + - set: + field: tags + value: custom09 + - pipeline: + name: common + custom10: + description: Custom Pipeline + processors: + - set: + field: tags + value: custom10 + - pipeline: + name: common index_settings: global_overrides: index_template: From bc75be940243793ee0b33c031efbcc00c2c988e3 Mon Sep 17 00:00:00 2001 From: Wes Date: Wed, 31 Jan 2024 20:16:48 +0000 Subject: [PATCH 051/777] Custom pipelines in UI --- salt/elasticsearch/config.sls | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/salt/elasticsearch/config.sls b/salt/elasticsearch/config.sls index 23e11a710..27a8a0fd6 100644 --- a/salt/elasticsearch/config.sls +++ b/salt/elasticsearch/config.sls @@ -118,6 +118,19 @@ esingestconf: - user: 930 - group: 939 +# Auto-generate Elasticsearch ingest node pipelines from pillar +{% for pipeline, config in ELASTICSEARCHMERGED.pipelines.items() %} +es_ingest_conf_{{pipeline}}: + file.managed: + - name: /opt/so/conf/elasticsearch/ingest/{{ pipeline }} + - source: salt://elasticsearch/base-template.json.jinja + - defaults: + TEMPLATE_CONFIG: {{ config }} + - template: jinja + - onchanges_in: + - file: so-pipelines-reload +{% endfor %} + eslog4jfile: file.managed: - name: /opt/so/conf/elasticsearch/log4j2.properties From 1853dc398bc377dc367cd0aaeeaefece7acec3c5 Mon Sep 17 00:00:00 2001 From: Wes Date: Wed, 31 Jan 2024 20:17:33 +0000 Subject: [PATCH 052/777] Custom pipeline configuration --- salt/logstash/defaults.yaml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/salt/logstash/defaults.yaml b/salt/logstash/defaults.yaml index e4c18cc64..2cafce6fd 100644 --- a/salt/logstash/defaults.yaml +++ b/salt/logstash/defaults.yaml @@ -42,6 +42,24 @@ logstash: custom2: [] custom3: [] custom4: [] + pipeline_config: + custom01: |- + filter { + if [event][module] =~ "zeek" { + mutate { + add_tag => ["network_stuff"] + } + } + } + custom02: PLACEHOLDER + custom03: PLACEHOLDER + custom04: PLACEHOLDER + custom05: PLACEHOLDER + custom06: PLACEHOLDER + custom07: PLACEHOLDER + custom08: PLACEHOLDER + custom09: PLACEHOLDER + custom10: PLACEHOLDER settings: lsheap: 500m config: From 4672a5b8ebe83e4c5abac79abf5e87ab3c1f3d39 Mon Sep 17 00:00:00 2001 From: Wes Date: Wed, 31 Jan 2024 20:18:17 +0000 Subject: [PATCH 053/777] Custom pipeline configuration in UI --- salt/logstash/config.sls | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/salt/logstash/config.sls b/salt/logstash/config.sls index 6ff33ff1a..8a59c83b7 100644 --- a/salt/logstash/config.sls +++ b/salt/logstash/config.sls @@ -63,6 +63,20 @@ lspipelinedir: - user: 931 - group: 939 +# Auto-generate Logstash pipeline config +{% for pipeline, config in LOGSTASH_MERGED.pipeline_config.items() %} +{% for assigned_pipeline in ASSIGNED_PIPELINES %} +{% set custom_pipeline = 'custom/' + pipeline + '.conf' %} +{% if custom_pipeline in LOGSTASH_MERGED.defined_pipelines[assigned_pipeline] %} +ls_custom_pipeline_conf_{{assigned_pipeline}}_{{pipeline}}: + file.managed: + - name: /opt/so/conf/logstash/pipelines/{{assigned_pipeline}}/{{ pipeline }}.conf + - contents: LOGSTASH_MERGED.pipeline_config.{{pipeline}} +{% endif %} +{% endfor %} +{% endfor %} + + {% for assigned_pipeline in ASSIGNED_PIPELINES %} {% for CONFIGFILE in LOGSTASH_MERGED.defined_pipelines[assigned_pipeline] %} ls_pipeline_{{assigned_pipeline}}_{{CONFIGFILE.split('.')[0] | replace("/","_") }}: From 0fe96bfc2d66223f2d3ffa1ad4ee30ed5b1144fc Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Wed, 31 Jan 2024 16:17:40 -0500 Subject: [PATCH 054/777] switch to symlink --- salt/elasticfleet/enabled.sls | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/salt/elasticfleet/enabled.sls b/salt/elasticfleet/enabled.sls index f5f53c2a5..a84d51cfc 100644 --- a/salt/elasticfleet/enabled.sls +++ b/salt/elasticfleet/enabled.sls @@ -44,6 +44,13 @@ so-elastic-fleet-auto-configure-artifact-urls: cmd.run: - name: /usr/sbin/so-elastic-fleet-artifacts-url-update - retry: True + +elasticagentartifactssymlink: + file.symlink: + - name: /opt/so/saltstack/local/salt/beats + - target: /nsm/elastic-fleet/artifacts/beats + - user: socore + - group: socore {% endif %} # Sync Elastic Agent artifacts to Fleet Node @@ -51,8 +58,7 @@ so-elastic-fleet-auto-configure-artifact-urls: elasticagent_syncartifacts: file.recurse: - name: /nsm/elastic-fleet/artifacts/beats - - source: salt://beats?saltenv=elasticartifacts - + - source: salt://beats {% endif %} {% if SERVICETOKEN != '' %} From 341ff5b56436289ca9614c08dea10134d222f239 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 31 Jan 2024 16:18:51 -0500 Subject: [PATCH 055/777] Update so-functions --- setup/so-functions | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index d19f27620..46bb0b4b2 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1933,7 +1933,11 @@ saltify() { logCmd "dnf -y install salt-$SALTVERSION salt-master-$SALTVERSION salt-minion-$SALTVERSION" else # We just need the minion - logCmd "dnf -y install salt-$SALTVERSION salt-minion-$SALTVERSION" + if [[ $is_airgap ]]; then + logCmd "dnf -y install salt salt-minion" + else + logCmd "dnf -y install salt-$SALTVERSION salt-minion-$SALTVERSION" + fi fi fi From 2f03248612bc7e881733d1eec9201d8e70f3fd7c Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 31 Jan 2024 16:22:44 -0500 Subject: [PATCH 056/777] use different nginx defaults for so-fleet node hosting artifacts --- salt/docker/defaults.yaml | 7 +++++++ salt/firewall/containers.map.jinja | 2 +- salt/nginx/enabled.sls | 23 +++++++++++++++-------- 3 files changed, 23 insertions(+), 9 deletions(-) diff --git a/salt/docker/defaults.yaml b/salt/docker/defaults.yaml index 9a27843ae..4bc212fbe 100644 --- a/salt/docker/defaults.yaml +++ b/salt/docker/defaults.yaml @@ -84,6 +84,13 @@ docker: custom_bind_mounts: [] extra_hosts: [] extra_env: [] + 'so-nginx-fleet-node': + final_octet: 31 + port_bindings: + - 8443:8443 + custom_bind_mounts: [] + extra_hosts: [] + extra_env: [] 'so-playbook': final_octet: 32 port_bindings: diff --git a/salt/firewall/containers.map.jinja b/salt/firewall/containers.map.jinja index 99a3bd5d0..b3ead0f4c 100644 --- a/salt/firewall/containers.map.jinja +++ b/salt/firewall/containers.map.jinja @@ -95,7 +95,7 @@ {% set NODE_CONTAINERS = [ 'so-elastic-fleet', 'so-logstash', - 'so-nginx' + 'so-nginx-fleet-node' ] %} {% elif GLOBALS.role == 'so-sensor' %} diff --git a/salt/nginx/enabled.sls b/salt/nginx/enabled.sls index eca9c237a..273fb65be 100644 --- a/salt/nginx/enabled.sls +++ b/salt/nginx/enabled.sls @@ -94,17 +94,24 @@ make-rule-dir-nginx: {% endif %} +{# if this is an so-fleet node then we want to use the port bindings, custom bind mounts defined for fleet #} +{% if GLOBALS.role == 'so-fleet' %} +{% set container_config = 'so-nginx-fleet-node' %} +{% else %} +{% set container_config = 'so-nginx' %} +{% endif %} + so-nginx: docker_container.running: - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-nginx:{{ GLOBALS.so_version }} - hostname: so-nginx - networks: - sobridge: - - ipv4_address: {{ DOCKER.containers['so-nginx'].ip }} + - ipv4_address: {{ DOCKER.containers[container_config].ip }} - extra_hosts: - {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }} - {% if DOCKER.containers['so-nginx'].extra_hosts %} - {% for XTRAHOST in DOCKER.containers['so-nginx'].extra_hosts %} + {% if DOCKER.containers[container_config].extra_hosts %} + {% for XTRAHOST in DOCKER.containers[container_config].extra_hosts %} - {{ XTRAHOST }} {% endfor %} {% endif %} @@ -124,20 +131,20 @@ so-nginx: - /nsm/repo:/opt/socore/html/repo:ro - /nsm/rules:/nsm/rules:ro {% endif %} - {% if DOCKER.containers['so-nginx'].custom_bind_mounts %} - {% for BIND in DOCKER.containers['so-nginx'].custom_bind_mounts %} + {% if DOCKER.containers[container_config].custom_bind_mounts %} + {% for BIND in DOCKER.containers[container_config].custom_bind_mounts %} - {{ BIND }} {% endfor %} {% endif %} - {% if DOCKER.containers['so-nginx'].extra_env %} + {% if DOCKER.containers[container_config].extra_env %} - environment: - {% for XTRAENV in DOCKER.containers['so-nginx'].extra_env %} + {% for XTRAENV in DOCKER.containers[container_config].extra_env %} - {{ XTRAENV }} {% endfor %} {% endif %} - cap_add: NET_BIND_SERVICE - port_bindings: - {% for BINDING in DOCKER.containers['so-nginx'].port_bindings %} + {% for BINDING in DOCKER.containers[container_config].port_bindings %} - {{ BINDING }} {% endfor %} - watch: From ae32ac40c2dde62a0c26319c78a667700406f94e Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 31 Jan 2024 16:28:45 -0500 Subject: [PATCH 057/777] add fleet node nginx to docker annotations --- salt/docker/soc_docker.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/docker/soc_docker.yaml b/salt/docker/soc_docker.yaml index 850324a9e..6e0efeb20 100644 --- a/salt/docker/soc_docker.yaml +++ b/salt/docker/soc_docker.yaml @@ -48,6 +48,7 @@ docker: so-logstash: *dockerOptions so-mysql: *dockerOptions so-nginx: *dockerOptions + so-nginx-fleet-node: *dockerOptions so-playbook: *dockerOptions so-redis: *dockerOptions so-sensoroni: *dockerOptions From bc502cc065c934d1afad9c08c4001a5bcb9dd732 Mon Sep 17 00:00:00 2001 From: Wes Date: Wed, 31 Jan 2024 21:46:33 +0000 Subject: [PATCH 058/777] Custom Elasticserach pipeline annotations --- salt/elasticsearch/soc_elasticsearch.yaml | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/salt/elasticsearch/soc_elasticsearch.yaml b/salt/elasticsearch/soc_elasticsearch.yaml index c54e07660..0f410e716 100644 --- a/salt/elasticsearch/soc_elasticsearch.yaml +++ b/salt/elasticsearch/soc_elasticsearch.yaml @@ -45,6 +45,28 @@ elasticsearch: description: Max number of boolean clauses per query. global: True helpLink: elasticsearch.html + pipelines: + custom01: &pipelines + description: + description: Description of the ingest node pipeline + global: True + advanced: True + helpLink: elasticsearch.html + processors: + description: Processors for the ingest node pipeline + global: True + advanced: True + multiline: True + helpLink: elasticsearch.html + custom02: *pipelines + custom03: *pipelines + custom04: *pipelines + custom05: *pipelines + custom06: *pipelines + custom07: *pipelines + custom08: *pipelines + custom09: *pipelines + custom10: *pipelines index_settings: global_overrides: index_template: From 136097f9816b30b85804a2b43ddf208205c28581 Mon Sep 17 00:00:00 2001 From: Wes Date: Wed, 31 Jan 2024 21:47:09 +0000 Subject: [PATCH 059/777] Custom Logstash pipeline annotations --- salt/logstash/soc_logstash.yaml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/salt/logstash/soc_logstash.yaml b/salt/logstash/soc_logstash.yaml index bcb99bad5..dc52d63a8 100644 --- a/salt/logstash/soc_logstash.yaml +++ b/salt/logstash/soc_logstash.yaml @@ -31,6 +31,22 @@ logstash: custom2: *defined_pipelines custom3: *defined_pipelines custom4: *defined_pipelines + pipeline_config: + custom01: &pipeline_config + description: Pipeline configuration for Logstash + advanced: True + multiline: True + forcedType: string + helpLink: logstash.html + custom02: *pipeline_config + custom03: *pipeline_config + custom04: *pipeline_config + custom05: *pipeline_config + custom06: *pipeline_config + custom07: *pipeline_config + custom08: *pipeline_config + custom09: *pipeline_config + custom10: *pipeline_config settings: lsheap: description: Heap size to use for logstash From 881d6b313e394a56fc7e64c3de3302a6a1719b3e Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Wed, 31 Jan 2024 17:04:11 -0500 Subject: [PATCH 060/777] Update VERSION - kilo --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 9cf89c6c7..7f2e97617 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.4.50 +2.4.0-kilo From 49b5788ac14d4f79f6cac6b300e5f5cc6357172f Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Thu, 1 Feb 2024 07:21:49 -0500 Subject: [PATCH 061/777] add bindings --- salt/soc/enabled.sls | 1 + salt/strelka/backend/enabled.sls | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/salt/soc/enabled.sls b/salt/soc/enabled.sls index 2661587f4..0eae0e80e 100644 --- a/salt/soc/enabled.sls +++ b/salt/soc/enabled.sls @@ -22,6 +22,7 @@ so-soc: - sobridge: - ipv4_address: {{ DOCKER.containers['so-soc'].ip }} - binds: + - /nsm/rules:/nsm/rules:rw #Need to tighten this up? - /nsm/soc/jobs:/opt/sensoroni/jobs:rw - /nsm/soc/uploads:/nsm/soc/uploads:rw - /opt/so/log/soc/:/opt/sensoroni/logs/:rw diff --git a/salt/strelka/backend/enabled.sls b/salt/strelka/backend/enabled.sls index fc56f4197..9ebb1a148 100644 --- a/salt/strelka/backend/enabled.sls +++ b/salt/strelka/backend/enabled.sls @@ -42,8 +42,8 @@ strelka_backend: {% endfor %} {% endif %} - restart_policy: on-failure - - watch: - - file: strelkarules + #- watch: + # - file: strelkarules delete_so-strelka-backend_so-status.disabled: file.uncomment: From 182667bafb3769b636f715385d24805dda40c2bb Mon Sep 17 00:00:00 2001 From: Wes Date: Thu, 1 Feb 2024 13:59:23 +0000 Subject: [PATCH 062/777] Change numbers for Elasticsearch --- salt/elasticsearch/defaults.yaml | 40 +++++++++++------------ salt/elasticsearch/soc_elasticsearch.yaml | 20 ++++++------ 2 files changed, 30 insertions(+), 30 deletions(-) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index 541f3fae7..03cd6d519 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -56,84 +56,84 @@ elasticsearch: verification_mode: none enabled: false pipelines: - custom01: + custom001: description: Custom Pipeline processors: - set: field: tags - value: custom01 + value: custom001 - pipeline: name: common - custom02: + custom002: description: Custom Pipeline processors: - set: field: tags - value: custom02 + value: custom002 - pipeline: name: common - custom03: + custom003: description: Custom Pipeline processors: - set: field: tags - value: custom03 + value: custom003 - pipeline: name: common - custom04: + custom004: description: Custom Pipeline processors: - set: field: tags - value: custom04 + value: custom004 - pipeline: name: common - custom05: + custom005: description: Custom Pipeline processors: - set: field: tags - value: custom05 + value: custom005 - pipeline: name: common - custom06: + custom006: description: Custom Pipeline processors: - set: field: tags - value: custom06 + value: custom006 - pipeline: name: common - custom07: + custom007: description: Custom Pipeline processors: - set: field: tags - value: custom07 + value: custom007 - pipeline: name: common - custom08: + custom008: description: Custom Pipeline processors: - set: field: tags - value: custom08 + value: custom008 - pipeline: name: common - custom09: + custom009: description: Custom Pipeline processors: - set: field: tags - value: custom09 + value: custom009 - pipeline: name: common - custom10: + custom010: description: Custom Pipeline processors: - set: field: tags - value: custom10 + value: custom010 - pipeline: name: common index_settings: diff --git a/salt/elasticsearch/soc_elasticsearch.yaml b/salt/elasticsearch/soc_elasticsearch.yaml index 0f410e716..9a64190b3 100644 --- a/salt/elasticsearch/soc_elasticsearch.yaml +++ b/salt/elasticsearch/soc_elasticsearch.yaml @@ -46,7 +46,7 @@ elasticsearch: global: True helpLink: elasticsearch.html pipelines: - custom01: &pipelines + custom001: &pipelines description: description: Description of the ingest node pipeline global: True @@ -58,15 +58,15 @@ elasticsearch: advanced: True multiline: True helpLink: elasticsearch.html - custom02: *pipelines - custom03: *pipelines - custom04: *pipelines - custom05: *pipelines - custom06: *pipelines - custom07: *pipelines - custom08: *pipelines - custom09: *pipelines - custom10: *pipelines + custom002: *pipelines + custom003: *pipelines + custom004: *pipelines + custom005: *pipelines + custom006: *pipelines + custom007: *pipelines + custom008: *pipelines + custom009: *pipelines + custom010: *pipelines index_settings: global_overrides: index_template: From 1818e134cafe3c29224d66cd623f839d2065e8cd Mon Sep 17 00:00:00 2001 From: Wes Date: Thu, 1 Feb 2024 14:01:55 +0000 Subject: [PATCH 063/777] Change numbers for Logstash --- salt/logstash/defaults.yaml | 20 ++++++++++---------- salt/logstash/soc_logstash.yaml | 20 ++++++++++---------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/salt/logstash/defaults.yaml b/salt/logstash/defaults.yaml index 2cafce6fd..348acb622 100644 --- a/salt/logstash/defaults.yaml +++ b/salt/logstash/defaults.yaml @@ -43,7 +43,7 @@ logstash: custom3: [] custom4: [] pipeline_config: - custom01: |- + custom001: |- filter { if [event][module] =~ "zeek" { mutate { @@ -51,15 +51,15 @@ logstash: } } } - custom02: PLACEHOLDER - custom03: PLACEHOLDER - custom04: PLACEHOLDER - custom05: PLACEHOLDER - custom06: PLACEHOLDER - custom07: PLACEHOLDER - custom08: PLACEHOLDER - custom09: PLACEHOLDER - custom10: PLACEHOLDER + custom002: PLACEHOLDER + custom003: PLACEHOLDER + custom004: PLACEHOLDER + custom005: PLACEHOLDER + custom006: PLACEHOLDER + custom007: PLACEHOLDER + custom008: PLACEHOLDER + custom009: PLACEHOLDER + custom010: PLACEHOLDER settings: lsheap: 500m config: diff --git a/salt/logstash/soc_logstash.yaml b/salt/logstash/soc_logstash.yaml index dc52d63a8..3172ff7c5 100644 --- a/salt/logstash/soc_logstash.yaml +++ b/salt/logstash/soc_logstash.yaml @@ -32,21 +32,21 @@ logstash: custom3: *defined_pipelines custom4: *defined_pipelines pipeline_config: - custom01: &pipeline_config + custom001: &pipeline_config description: Pipeline configuration for Logstash advanced: True multiline: True forcedType: string helpLink: logstash.html - custom02: *pipeline_config - custom03: *pipeline_config - custom04: *pipeline_config - custom05: *pipeline_config - custom06: *pipeline_config - custom07: *pipeline_config - custom08: *pipeline_config - custom09: *pipeline_config - custom10: *pipeline_config + custom002: *pipeline_config + custom003: *pipeline_config + custom004: *pipeline_config + custom005: *pipeline_config + custom006: *pipeline_config + custom007: *pipeline_config + custom008: *pipeline_config + custom009: *pipeline_config + custom010: *pipeline_config settings: lsheap: description: Heap size to use for logstash From e090518b5920ef593e9bbb336a938edb6afe3a0e Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Thu, 1 Feb 2024 09:46:53 -0500 Subject: [PATCH 064/777] Refactor script --- .../so-elastic-fleet-artifacts-url-update | 32 +++++++++++++------ 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-artifacts-url-update b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-artifacts-url-update index bcd3ef7f7..ffbeaf6e1 100644 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-artifacts-url-update +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-artifacts-url-update @@ -26,7 +26,7 @@ array_contains () { } # Query for the current Grid Nodes that are running Logstash (which includes Fleet Nodes) -LOGSTASHNODES=$(salt-call --out=json pillar.get logstash:nodes | jq '.local') +LOGSTASHNODES='{{ salt['pillar.get']('logstash:nodes', {}) | tojson }}' # Initialize an array for new hosts from Fleet Nodes declare -a NEW_LIST=() @@ -40,34 +40,46 @@ if grep -q "fleet" <<< "$LOGSTASHNODES"; then fi # Create an array for expected hosts and their names -declare -A expected_hosts=( +declare -A expected_urls=( ["http://{{ GLOBALS.url_base }}:8443/artifacts/"]="FleetServer_{{ GLOBALS.hostname }}" ["https://artifacts.elastic.co/downloads/"]="Elastic Artifacts" ) -# Merge NEW_LIST into expected_hosts +# Merge NEW_LIST into expected_urls for host in "${NEW_LIST[@]}"; do - expected_hosts[$host]="FleetServer" + expected_urls[$host]="FleetServer" done # Fetch the current hosts from the API -current_hosts=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/agent_download_sources' | jq -r .items[].host) +current_urls=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/agent_download_sources' | jq -r .items[].host) # Convert current hosts to an array -IFS=$'\n' read -rd '' -a current_hosts_array <<<"$current_hosts" +IFS=$'\n' read -rd '' -a current_urls_array <<<"$current_urls" + +# Flag to track if any host was added +any_url_added=0 # Check each expected host -for host in "${!expected_hosts[@]}"; do - array_contains current_hosts_array "$host" || { - echo "$host (${expected_hosts[$host]}) is missing. Adding it..." +for host in "${!expected_urls[@]}"; do + array_contains current_urls_array "$host" || { + echo "$host (${expected_urls[$host]}) is missing. Adding it..." # Prepare the JSON payload JSON_STRING=$( jq -n \ - --arg NAME "${expected_hosts[$host]}" \ + --arg NAME "${expected_urls[$host]}" \ --arg URL "$host" \ '{"name":$NAME,"host":$URL}' ) # Create the missing host curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/agent_download_sources" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" + + # Flag that an artifact URL was added + any_url_added=1 } + done + + +if [[ $any_url_added -eq 0 ]]; then + echo "All expected artifact URLs are present. No updates needed." +fi From 0d5db58c86f39521ec954d186db6050b7d41fbad Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 1 Feb 2024 10:32:41 -0500 Subject: [PATCH 065/777] upgrade salt3006.6 --- salt/salt/master.defaults.yaml | 2 +- salt/salt/minion.defaults.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/salt/master.defaults.yaml b/salt/salt/master.defaults.yaml index 1b4d2e63a..19677f70b 100644 --- a/salt/salt/master.defaults.yaml +++ b/salt/salt/master.defaults.yaml @@ -1,4 +1,4 @@ # version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched salt: master: - version: 3006.5 + version: 3006.6 diff --git a/salt/salt/minion.defaults.yaml b/salt/salt/minion.defaults.yaml index c15929951..2e4ebc93e 100644 --- a/salt/salt/minion.defaults.yaml +++ b/salt/salt/minion.defaults.yaml @@ -1,6 +1,6 @@ # version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched salt: minion: - version: 3006.5 + version: 3006.6 check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default service_start_delay: 30 # in seconds. From fe196b56619ee240a81508a111e2e13427e3f382 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Thu, 1 Feb 2024 12:22:50 -0500 Subject: [PATCH 066/777] Add SOC Config for Detections --- salt/soc/defaults.yaml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index c1b9470c8..fdbdfd6b2 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1002,6 +1002,11 @@ soc: jobDir: jobs kratos: hostUrl: + elastalertengine: + communityRulesImportFrequencySeconds: 180 + elastAlertRulesFolder: /opt/so/rules/elastalert + rulesFingerprintFile: /opt/so/conf/soc/sigma.fingerprint + sigmaRulePackages: all elastic: hostUrl: remoteHostUrls: [] @@ -1043,6 +1048,15 @@ soc: - rbac/custom_roles userFiles: - rbac/users_roles + strelkaengine: + compileYaraPythonScriptPath: /opt/so/conf/strelka/compile_yara.py + reposFolder: /nsm/rules/strelka/repos + rulesRepos: + - https://github.com/Security-Onion-Solutions/securityonion-yara + yaraRulesFolder: /opt/so/conf/strelka/rules + suricataengine: + communityRulesFile: /nsm/rules/suricata/emerging-all.rules + rulesFingerprintFile: /opt/so/conf/soc/emerging-all.fingerprint client: enableReverseLookup: false docsUrl: /docs/ From 7a29b3a529cd3ddb60e2b2a144d6b8861cac524e Mon Sep 17 00:00:00 2001 From: Pete Date: Fri, 2 Feb 2024 08:45:01 -0500 Subject: [PATCH 067/777] call salt before stopping salt services salt-call does not work when the salt-master is not running. If these calls are to succeed, they should occur before the salt services are stopped. --- setup/so-functions | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index d19f27620..717ec0ce4 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1600,6 +1600,9 @@ reinstall_init() { salt-call -l info saltutil.kill_all_jobs --local fi + logCmd "salt-call state.apply ca.remove -linfo --local --file-root=../salt" + logCmd "salt-call state.apply ssl.remove -linfo --local --file-root=../salt" + # Kill any salt processes (safely) for service in "${salt_services[@]}"; do # Stop the service in the background so we can exit after a certain amount of time @@ -1621,9 +1624,6 @@ reinstall_init() { done done - logCmd "salt-call state.apply ca.remove -linfo --local --file-root=../salt" - logCmd "salt-call state.apply ssl.remove -linfo --local --file-root=../salt" - # Remove all salt configs rm -rf /etc/salt/engines/* /etc/salt/grains /etc/salt/master /etc/salt/master.d/* /etc/salt/minion /etc/salt/minion.d/* /etc/salt/pki/* /etc/salt/proxy /etc/salt/proxy.d/* /var/cache/salt/ From cf83d1cb869a5c2a53d3a948e9193c152261fec6 Mon Sep 17 00:00:00 2001 From: Pete Date: Fri, 2 Feb 2024 12:25:16 -0500 Subject: [PATCH 068/777] feat: use mountpoint for Elastic log limit Instead of just existence, this checks if the directories are separate mountpoints when determining disk size and log_size_limit calculations. It also sets the percentage to 80 if /nsm/elasticsearch is a separate mountpoint. This allows for better disk utilization on server configurations where /nsm is based on large slow HDDs for increased PCAP retention but /nsm/elasticsearch is based on SSDs for faster Elasticsearch performance. --- setup/so-functions | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index d19f27620..3caf76c06 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -2148,11 +2148,12 @@ set_default_log_size() { esac local disk_dir="/" - if [ -d /nsm ]; then + if mountpoint -q /nsm; then disk_dir="/nsm" fi - if [ -d /nsm/elasticsearch ]; then + if mountpoint -q /nsm/elasticsearch; then disk_dir="/nsm/elasticsearch" + percentage=80 fi local disk_size_1k From 8f81c9eb68b4c64dace40e065d91cf75b10019f0 Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Fri, 2 Feb 2024 11:49:58 -0700 Subject: [PATCH 069/777] Updating config for Detection(s) --- salt/soc/defaults.yaml | 52 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 51 insertions(+), 1 deletion(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index fdbdfd6b2..7f6686431 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -64,7 +64,7 @@ soc: icon: fa-external-link-alt target: _blank links: - - 'https://{:sublime.url}/messages/{:sublime.message_group_id}' + - 'https://{:sublime.url}/messages/{:sublime.message_group_id}' eventFields: default: - soc_timestamp @@ -1756,3 +1756,53 @@ soc: - amber+strict - red customEnabled: false + detections: + viewEnabled: true + createLink: /detection/create + eventFetchLimit: 500 + eventItemsPerPage: 50 + groupFetchLimit: 50 + mostRecentlyUsedLimit: 5 + safeStringMaxLength: 100 + queryBaseFilter: '_index:"*:so-detection" AND so_kind:detection' + eventFields: + default: + - so_detection.title + - so_detection.isEnabled + - so_detection.engine + - "@timestamp" + queries: + - name: "All Detections" + query: "_id:*" + - name: "Local Rules" + query: "so_detection.isCommunity:false" + - name: "Enabled" + query: "so_detection.isEnabled:true" + - name: "Disabled" + query: "so_detection.isEnabled:false" + - name: "Suricata" + query: "so_detection.engine:suricata" + - name: "ElastAlert" + query: "so_detection.engine:elastalert" + - name: "Strelka" + query: "so_detection.engine:strelka" + detection: + presets: + severity: + customEnabled: false + labels: + - unknown + - informational + - low + - medium + - high + - critical + engine: + customEnabled: false + labels: + - suricata + - elastalert + - strelka + severityTranslations: + minor: low + major: high From 378c99ae8844d78bf6691aa2179cc898e2de17d4 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Fri, 2 Feb 2024 18:25:54 -0500 Subject: [PATCH 070/777] Fix bindings --- salt/soc/defaults.yaml | 8 ++++---- salt/soc/enabled.sls | 3 +++ 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 7f6686431..6811529bf 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1004,8 +1004,8 @@ soc: hostUrl: elastalertengine: communityRulesImportFrequencySeconds: 180 - elastAlertRulesFolder: /opt/so/rules/elastalert - rulesFingerprintFile: /opt/so/conf/soc/sigma.fingerprint + elastAlertRulesFolder: /opt/sensoroni/elastalert + rulesFingerprintFile: /opt/sensoroni/fingerprints/sigma.fingerprint sigmaRulePackages: all elastic: hostUrl: @@ -1053,10 +1053,10 @@ soc: reposFolder: /nsm/rules/strelka/repos rulesRepos: - https://github.com/Security-Onion-Solutions/securityonion-yara - yaraRulesFolder: /opt/so/conf/strelka/rules + yaraRulesFolder: /opt/sensoroni/yara suricataengine: communityRulesFile: /nsm/rules/suricata/emerging-all.rules - rulesFingerprintFile: /opt/so/conf/soc/emerging-all.fingerprint + rulesFingerprintFile: /opt/sensoroni/fingerprints/emerging-all.fingerprint client: enableReverseLookup: false docsUrl: /docs/ diff --git a/salt/soc/enabled.sls b/salt/soc/enabled.sls index 0eae0e80e..11f73e761 100644 --- a/salt/soc/enabled.sls +++ b/salt/soc/enabled.sls @@ -23,6 +23,9 @@ so-soc: - ipv4_address: {{ DOCKER.containers['so-soc'].ip }} - binds: - /nsm/rules:/nsm/rules:rw #Need to tighten this up? + - /opt/so/rules/yara:/opt/sensoroni/yara:rw + - /opt/so/rules/elastalert/rules:/opt/sensoroni/elastalert:rw + - /opt/so/conf/soc/fingerprints:/opt/sensoroni/fingerprints:rw - /nsm/soc/jobs:/opt/sensoroni/jobs:rw - /nsm/soc/uploads:/nsm/soc/uploads:rw - /opt/so/log/soc/:/opt/sensoroni/logs/:rw From 2643ae08a75c097abc1de60397f33b72a66655f0 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 5 Feb 2024 17:54:30 -0500 Subject: [PATCH 071/777] add append to list --- salt/manager/tools/sbin/so-yaml.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/salt/manager/tools/sbin/so-yaml.py b/salt/manager/tools/sbin/so-yaml.py index 874fc9e0f..572585df9 100755 --- a/salt/manager/tools/sbin/so-yaml.py +++ b/salt/manager/tools/sbin/so-yaml.py @@ -16,12 +16,14 @@ lockFile = "/tmp/so-yaml.lock" def showUsage(args): print('Usage: {} [ARGS...]'.format(sys.argv[0])) print(' General commands:') + print(' append - Append a list item to a yaml key, if it exists and is a list. Requires KEY and LISTITEM args.') print(' remove - Removes a yaml key, if it exists. Requires KEY arg.') print(' help - Prints this usage information.') print('') print(' Where:') print(' YAML_FILE - Path to the file that will be modified. Ex: /opt/so/conf/service/conf.yaml') print(' KEY - YAML key, does not support \' or " characters at this time. Ex: level1.level2') + print(' LISTITEM - Item to add to the list.') sys.exit(1) @@ -35,6 +37,32 @@ def writeYaml(filename, content): file = open(filename, "w") return yaml.dump(content, file) +def appendItem(content, key, listItem): + pieces = key.split(".", 1) + if len(pieces) > 1: + appendItem(content[pieces[0]], pieces[1], listItem) + else: + try: + content[key].append(listItem) + except TypeError: + print("The contents key provided is likely not a list. No action was taken on the file.") + return 1 + +def append(args): + if len(args) != 3: + print('Missing filename, key arg, or list item to append', file=sys.stderr) + showUsage(None) + return + + filename = args[0] + key = args[1] + listItem = args[2] + + content = loadYaml(filename) + appendItem(content, key, listItem) + writeYaml(filename, content) + + return 0 def removeKey(content, key): pieces = key.split(".", 1) @@ -69,6 +97,7 @@ def main(): commands = { "help": showUsage, + "append": append, "remove": remove, } From 9d62ade32e72f10e058428c1e441b0de225d8527 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 6 Feb 2024 11:14:27 -0500 Subject: [PATCH 072/777] update so-yaml tests --- salt/manager/tools/sbin/so-yaml.py | 2 +- salt/manager/tools/sbin/so-yaml_test.py | 44 +++++++++++++++++++++++++ 2 files changed, 45 insertions(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/so-yaml.py b/salt/manager/tools/sbin/so-yaml.py index 572585df9..9f4bdbb98 100755 --- a/salt/manager/tools/sbin/so-yaml.py +++ b/salt/manager/tools/sbin/so-yaml.py @@ -45,7 +45,7 @@ def appendItem(content, key, listItem): try: content[key].append(listItem) except TypeError: - print("The contents key provided is likely not a list. No action was taken on the file.") + print("The key provided is likely not a list. No action was taken on the file.") return 1 def append(args): diff --git a/salt/manager/tools/sbin/so-yaml_test.py b/salt/manager/tools/sbin/so-yaml_test.py index 7d0ed1a8e..f70314ba3 100644 --- a/salt/manager/tools/sbin/so-yaml_test.py +++ b/salt/manager/tools/sbin/so-yaml_test.py @@ -105,3 +105,47 @@ class TestRemove(unittest.TestCase): self.assertEqual(actual, expected) sysmock.assert_called_once_with(1) self.assertIn(mock_stdout.getvalue(), "Missing filename or key arg\n") + + def test_append(self): + filename = "/tmp/so-yaml_test-remove.yaml" + file = open(filename, "w") + file.write("{key1: { child1: 123, child2: abc }, key2: false, key3: [a,b,c]}") + file.close() + + soyaml.append([filename, "key3", "d"]) + + file = open(filename, "r") + actual = file.read() + file.close() + expected = "key1:\n child1: 123\n child2: abc\nkey2: false\nkey3:\n- a\n- b\n- c\n- d\n" + self.assertEqual(actual, expected) + + def test_append_nested(self): + filename = "/tmp/so-yaml_test-remove.yaml" + file = open(filename, "w") + file.write("{key1: { child1: 123, child2: [a,b,c] }, key2: false, key3: [e,f,g]}") + file.close() + + soyaml.append([filename, "key1.child2", "d"]) + + file = open(filename, "r") + actual = file.read() + file.close() + + expected = "key1:\n child1: 123\n child2:\n - a\n - b\n - c\n - d\nkey2: false\nkey3:\n- e\n- f\n- g\n" + self.assertEqual(actual, expected) + + def test_append_nested_deep(self): + filename = "/tmp/so-yaml_test-remove.yaml" + file = open(filename, "w") + file.write("{key1: { child1: 123, child2: { deep1: 45, deep2: [a,b,c] } }, key2: false, key3: [e,f,g]}") + file.close() + + soyaml.append([filename, "key1.child2.deep2", "d"]) + + file = open(filename, "r") + actual = file.read() + file.close() + + expected = "key1:\n child1: 123\n child2:\n deep1: 45\n deep2:\n - a\n - b\n - c\n - d\nkey2: false\nkey3:\n- e\n- f\n- g\n" + self.assertEqual(actual, expected) From 7106095128e643a4e50891c4631a406c35479cc8 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Tue, 6 Feb 2024 15:39:23 -0500 Subject: [PATCH 073/777] FEATURE: Improve Correlate and Hunt actions on SOC Actions menu #12315 --- salt/soc/defaults.yaml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index c1b9470c8..86bb57c49 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -9,7 +9,7 @@ soc: icon: fa-crosshairs target: links: - - '/#/hunt?q="{value|escape}" | groupby event.module* event.dataset' + - '/#/hunt?q="{value|escape}" | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid' - name: actionAddToCase description: actionAddToCaseHelp icon: fa-briefcase @@ -23,13 +23,13 @@ soc: icon: fab fa-searchengin target: '' links: - - '/#/hunt?q=("{:log.id.fuid}" OR "{:log.id.uid}" OR "{:network.community_id}") | groupby event.module* event.dataset' - - '/#/hunt?q=("{:log.id.fuid}" OR "{:log.id.uid}") | groupby event.module* event.dataset' - - '/#/hunt?q=("{:log.id.fuid}" OR "{:network.community_id}") | groupby event.module* event.dataset' - - '/#/hunt?q=("{:log.id.uid}" OR "{:network.community_id}") | groupby event.module* event.dataset' - - '/#/hunt?q="{:log.id.fuid}" | groupby event.module* event.dataset' - - '/#/hunt?q="{:log.id.uid}" | groupby event.module* event.dataset' - - '/#/hunt?q="{:network.community_id}" | groupby event.module* event.dataset' + - '/#/hunt?q=("{:log.id.fuid}" OR "{:log.id.uid}" OR "{:network.community_id}") | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid' + - '/#/hunt?q=("{:log.id.fuid}" OR "{:log.id.uid}") | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid' + - '/#/hunt?q=("{:log.id.fuid}" OR "{:network.community_id}") | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid' + - '/#/hunt?q=("{:log.id.uid}" OR "{:network.community_id}") | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid' + - '/#/hunt?q="{:log.id.fuid}" | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid' + - '/#/hunt?q="{:log.id.uid}" | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid' + - '/#/hunt?q="{:network.community_id}" | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid' - name: actionPcap description: actionPcapHelp icon: fa-stream From b3f61536670050996fd539444863aa95a203a2b9 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 6 Feb 2024 16:15:54 -0500 Subject: [PATCH 074/777] update so-yaml tests --- salt/manager/tools/sbin/so-yaml.py | 7 +++- salt/manager/tools/sbin/so-yaml_test.py | 52 +++++++++++++++++++++++++ 2 files changed, 57 insertions(+), 2 deletions(-) diff --git a/salt/manager/tools/sbin/so-yaml.py b/salt/manager/tools/sbin/so-yaml.py index 9f4bdbb98..24d520891 100755 --- a/salt/manager/tools/sbin/so-yaml.py +++ b/salt/manager/tools/sbin/so-yaml.py @@ -44,8 +44,11 @@ def appendItem(content, key, listItem): else: try: content[key].append(listItem) - except TypeError: - print("The key provided is likely not a list. No action was taken on the file.") + except AttributeError: + print("The key provided is not a list. No action was taken on the file.") + return 1 + except KeyError: + print("The key provided does not exist. No action was taken on the file.") return 1 def append(args): diff --git a/salt/manager/tools/sbin/so-yaml_test.py b/salt/manager/tools/sbin/so-yaml_test.py index f70314ba3..59db43860 100644 --- a/salt/manager/tools/sbin/so-yaml_test.py +++ b/salt/manager/tools/sbin/so-yaml_test.py @@ -149,3 +149,55 @@ class TestRemove(unittest.TestCase): expected = "key1:\n child1: 123\n child2:\n deep1: 45\n deep2:\n - a\n - b\n - c\n - d\nkey2: false\nkey3:\n- e\n- f\n- g\n" self.assertEqual(actual, expected) + + def test_append_key_noexist(self): + filename = "/tmp/so-yaml_test-append.yaml" + file = open(filename, "w") + file.write("{key1: { child1: 123, child2: { deep1: 45, deep2: [a,b,c] } }, key2: false, key3: [e,f,g]}") + file.close() + + with patch('sys.exit', new=MagicMock()) as sysmock: + with patch('sys.stdout', new=StringIO()) as mock_stdout: + sys.argv = ["cmd", "append", filename, "key4", "h"] + soyaml.main() + sysmock.assert_called() + self.assertEqual(mock_stdout.getvalue(), "The key provided does not exist. No action was taken on the file.\n") + + def test_append_key_noexist_deep(self): + filename = "/tmp/so-yaml_test-append.yaml" + file = open(filename, "w") + file.write("{key1: { child1: 123, child2: { deep1: 45, deep2: [a,b,c] } }, key2: false, key3: [e,f,g]}") + file.close() + + with patch('sys.exit', new=MagicMock()) as sysmock: + with patch('sys.stdout', new=StringIO()) as mock_stdout: + sys.argv = ["cmd", "append", filename, "key1.child2.deep3", "h"] + soyaml.main() + sysmock.assert_called() + self.assertEqual(mock_stdout.getvalue(), "The key provided does not exist. No action was taken on the file.\n") + + def test_append_key_nonlist(self): + filename = "/tmp/so-yaml_test-append.yaml" + file = open(filename, "w") + file.write("{key1: { child1: 123, child2: { deep1: 45, deep2: [a,b,c] } }, key2: false, key3: [e,f,g]}") + file.close() + + with patch('sys.exit', new=MagicMock()) as sysmock: + with patch('sys.stdout', new=StringIO()) as mock_stdout: + sys.argv = ["cmd", "append", filename, "key1", "h"] + soyaml.main() + sysmock.assert_called() + self.assertEqual(mock_stdout.getvalue(), "The key provided is not a list. No action was taken on the file.\n") + + def test_append_key_nonlist_deep(self): + filename = "/tmp/so-yaml_test-append.yaml" + file = open(filename, "w") + file.write("{key1: { child1: 123, child2: { deep1: 45, deep2: [a,b,c] } }, key2: false, key3: [e,f,g]}") + file.close() + + with patch('sys.exit', new=MagicMock()) as sysmock: + with patch('sys.stdout', new=StringIO()) as mock_stdout: + sys.argv = ["cmd", "append", filename, "key1.child2.deep1", "h"] + soyaml.main() + sysmock.assert_called() + self.assertEqual(mock_stdout.getvalue(), "The key provided is not a list. No action was taken on the file.\n") From 24fd3ef8cc40a4b0f3e7083ae935171079de11f8 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 6 Feb 2024 16:22:13 -0500 Subject: [PATCH 075/777] uopdate error message --- salt/manager/tools/sbin/so-yaml.py | 2 +- salt/manager/tools/sbin/so-yaml_test.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/manager/tools/sbin/so-yaml.py b/salt/manager/tools/sbin/so-yaml.py index 24d520891..41cab0b23 100755 --- a/salt/manager/tools/sbin/so-yaml.py +++ b/salt/manager/tools/sbin/so-yaml.py @@ -45,7 +45,7 @@ def appendItem(content, key, listItem): try: content[key].append(listItem) except AttributeError: - print("The key provided is not a list. No action was taken on the file.") + print("The existing value for the given key is not a list. No action was taken on the file.") return 1 except KeyError: print("The key provided does not exist. No action was taken on the file.") diff --git a/salt/manager/tools/sbin/so-yaml_test.py b/salt/manager/tools/sbin/so-yaml_test.py index 59db43860..488877ea1 100644 --- a/salt/manager/tools/sbin/so-yaml_test.py +++ b/salt/manager/tools/sbin/so-yaml_test.py @@ -187,7 +187,7 @@ class TestRemove(unittest.TestCase): sys.argv = ["cmd", "append", filename, "key1", "h"] soyaml.main() sysmock.assert_called() - self.assertEqual(mock_stdout.getvalue(), "The key provided is not a list. No action was taken on the file.\n") + self.assertEqual(mock_stdout.getvalue(), "The existing value for the given key is not a list. No action was taken on the file.\n") def test_append_key_nonlist_deep(self): filename = "/tmp/so-yaml_test-append.yaml" @@ -200,4 +200,4 @@ class TestRemove(unittest.TestCase): sys.argv = ["cmd", "append", filename, "key1.child2.deep1", "h"] soyaml.main() sysmock.assert_called() - self.assertEqual(mock_stdout.getvalue(), "The key provided is not a list. No action was taken on the file.\n") + self.assertEqual(mock_stdout.getvalue(), "The existing value for the given key is not a list. No action was taken on the file.\n") From f97d0f2f3644b298ef14c313fd9e94c39baa9573 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 7 Feb 2024 09:25:56 -0500 Subject: [PATCH 076/777] add /opt/so/rules/ to files_roots --- files/salt/master/master | 1 + salt/manager/tools/sbin/soup | 10 ++++++++++ salt/suricata/config.sls | 4 +++- 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/files/salt/master/master b/files/salt/master/master index b93fa93de..e309a560b 100644 --- a/files/salt/master/master +++ b/files/salt/master/master @@ -41,6 +41,7 @@ file_roots: base: - /opt/so/saltstack/local/salt - /opt/so/saltstack/default/salt + - /opt/so/rules # The master_roots setting configures a master-only copy of the file_roots dictionary, diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index a250116d1..6f086469a 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -594,6 +594,16 @@ up_to_2.4.50() { touch /opt/so/saltstack/local/pillar/stig/adv_stig.sls touch /opt/so/saltstack/local/pillar/stig/soc_stig.sls + # the file_roots need to be update due to salt 3006.6 upgrade not allowing symlinks outside the file_roots + # put new so-yaml in place + echo "Updating so-yaml" + \cp -v "$UPDATE_DIR/salt/manager/tools/sbin/so-yaml.py" "$DEFAULT_SALT_DIR/salt/manager/tools/sbin/" + \cp -v "$UPDATE_DIR/salt/manager/tools/sbin/so-yaml.py" /usr/sbin/ + echo "Creating a backup of the salt-master config." + cp -v /etc/salt/master "/etc/salt/master.so-$INSTALLEDVERSION" + echo "Adding /opt/so/rules to file_roots using so-yaml" + so-yaml.py append /etc/salt/master file_roots.base /opt/so/rules + INSTALLEDVERSION=2.4.50 } diff --git a/salt/suricata/config.sls b/salt/suricata/config.sls index 8d5279349..4804565ce 100644 --- a/salt/suricata/config.sls +++ b/salt/suricata/config.sls @@ -84,10 +84,12 @@ suridatadir: - mode: 770 - makedirs: True +# salt:// would resolve to /opt/so/rules because of the defined file_roots and +# nids not existing under /opt/so/saltstack/local/salt or /opt/so/saltstack/default/salt surirulesync: file.recurse: - name: /opt/so/conf/suricata/rules/ - - source: salt://suricata/rules/ + - source: salt://nids/ - user: 940 - group: 940 - show_changes: False From e42e07b245d32dd2bdb1d54e807d478209506efa Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 7 Feb 2024 13:05:45 -0500 Subject: [PATCH 077/777] update salt mine after salt-master restarts --- salt/manager/tools/sbin/soup | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 6f086469a..b4647bdbc 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -947,9 +947,6 @@ main() { systemctl_func "stop" "$cron_service_name" - # update mine items prior to stopping salt-minion and salt-master - update_salt_mine - echo "Updating dockers to $NEWVERSION." if [[ $is_airgap -eq 0 ]]; then airgap_update_dockers @@ -1025,6 +1022,9 @@ main() { salt-call state.apply salt.minion -l info queue=True echo "" + # ensure the mine is updated and populated before highstates run, following the salt-master restart + update_salt_mine + enable_highstate echo "" From 6534f392a9334a2dd1f1b89b9961d116d5c44de7 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 7 Feb 2024 14:25:28 -0500 Subject: [PATCH 078/777] update backup filename --- salt/manager/tools/sbin/soup | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index b4647bdbc..5bade9891 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -600,8 +600,11 @@ up_to_2.4.50() { \cp -v "$UPDATE_DIR/salt/manager/tools/sbin/so-yaml.py" "$DEFAULT_SALT_DIR/salt/manager/tools/sbin/" \cp -v "$UPDATE_DIR/salt/manager/tools/sbin/so-yaml.py" /usr/sbin/ echo "Creating a backup of the salt-master config." - cp -v /etc/salt/master "/etc/salt/master.so-$INSTALLEDVERSION" - echo "Adding /opt/so/rules to file_roots using so-yaml" + # INSTALLEDVERSION is 2.4.40 at this point, but we want the backup to have the version + # so was at prior to starting upgrade. use POSTVERSION here since it doesnt change until + # post upgrade changes. POSTVERSION set to INSTALLEDVERSION at start of soup + cp -v /etc/salt/master "/etc/salt/master.so-$POSTVERSION.bak" + echo "Adding /opt/so/rules to file_roots in /etc/salt/master using so-yaml" so-yaml.py append /etc/salt/master file_roots.base /opt/so/rules INSTALLEDVERSION=2.4.50 From b7b501d289a749c2d8fd0afe246cba9a2cec551b Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Wed, 7 Feb 2024 15:02:52 -0500 Subject: [PATCH 079/777] Add Sigma pipelines --- salt/soc/final_sigma_pipeline.yaml | 7 +++++++ salt/soc/so_sigma_pipeline.yaml | 18 ++++++++++++++++++ salt/soc/soc_soc.yaml | 13 +++++++++++++ 3 files changed, 38 insertions(+) create mode 100644 salt/soc/final_sigma_pipeline.yaml create mode 100644 salt/soc/so_sigma_pipeline.yaml diff --git a/salt/soc/final_sigma_pipeline.yaml b/salt/soc/final_sigma_pipeline.yaml new file mode 100644 index 000000000..656bfbb3e --- /dev/null +++ b/salt/soc/final_sigma_pipeline.yaml @@ -0,0 +1,7 @@ +name: Security Onion - Final Pipeline +priority: 95 +transformations: + - id: override_field_name_mapping + type: field_name_mapping + mapping: + FieldNameToOverride: NewFieldName diff --git a/salt/soc/so_sigma_pipeline.yaml b/salt/soc/so_sigma_pipeline.yaml new file mode 100644 index 000000000..a1c4d6d62 --- /dev/null +++ b/salt/soc/so_sigma_pipeline.yaml @@ -0,0 +1,18 @@ +name: Security Onion Baseline Pipeline +priority: 90 +transformations: + - id: baseline_field_name_mapping + type: field_name_mapping + mapping: + cs-method: http.method + c-uri: http.uri + c-useragent: http.useragent + cs-version: http.version + uid: user.uid + sid: rule.uuid + answer: answers + query: dns.query.name + src_ip: destination.ip.keyword + src_port: source.port + dst_ip: destination.ip.keyword + dst_port: destination.port \ No newline at end of file diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index 0dd39620b..fe672fe3e 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -32,6 +32,14 @@ soc: global: True advanced: True helpLink: soc-customization.html + final_sigma_pipeline__yaml: + title: Final Sigma Pipeline + description: Final Processing Pipeline for Sigma Rules + syntax: yaml + file: True + global: True + advanced: True + helpLink: soc-customization.html config: licenseKey: title: License Key @@ -62,6 +70,11 @@ soc: global: True advanced: True modules: + elastalertengine: + sigmaRulePackages: + description: 'One of the following: core | core+ | core++ | all' + global: True + advanced: False elastic: index: description: Comma-separated list of indices or index patterns (wildcard "*" supported) that SOC will search for records. From 7e3187c0b8f4bba0c992dbf7719a661bc9924735 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Wed, 7 Feb 2024 15:35:31 -0500 Subject: [PATCH 080/777] Fixup sigma pipelines --- salt/soc/config.sls | 16 ++++++++++++++++ salt/soc/enabled.sls | 2 ++ .../{ => files/soc}/final_sigma_pipeline.yaml | 0 salt/soc/{ => files/soc}/so_sigma_pipeline.yaml | 0 salt/soc/soc_soc.yaml | 2 +- 5 files changed, 19 insertions(+), 1 deletion(-) rename salt/soc/{ => files/soc}/final_sigma_pipeline.yaml (100%) rename salt/soc/{ => files/soc}/so_sigma_pipeline.yaml (100%) diff --git a/salt/soc/config.sls b/salt/soc/config.sls index 95135566b..549bf94cf 100644 --- a/salt/soc/config.sls +++ b/salt/soc/config.sls @@ -57,6 +57,22 @@ socmotd: - mode: 600 - template: jinja +socsigmafinalpipeline: + file.managed: + - name: /opt/so/conf/soc/sigma_final_pipeline.yaml + - source: salt://soc/files/soc/sigma_final_pipeline.yaml + - user: 939 + - group: 939 + - mode: 600 + +socsigmasopipeline: + file.managed: + - name: /opt/so/conf/soc/sigma_so_pipeline.yaml + - source: salt://soc/files/soc/sigma_so_pipeline.yaml + - user: 939 + - group: 939 + - mode: 600 + socbanner: file.managed: - name: /opt/so/conf/soc/banner.md diff --git a/salt/soc/enabled.sls b/salt/soc/enabled.sls index 11f73e761..535423179 100644 --- a/salt/soc/enabled.sls +++ b/salt/soc/enabled.sls @@ -32,6 +32,8 @@ so-soc: - /opt/so/conf/soc/soc.json:/opt/sensoroni/sensoroni.json:ro - /opt/so/conf/soc/motd.md:/opt/sensoroni/html/motd.md:ro - /opt/so/conf/soc/banner.md:/opt/sensoroni/html/login/banner.md:ro + - /opt/so/conf/soc/sigma_so_pipeline.yaml:/opt/sensoroni/sigma_so_pipeline.yaml:ro + - /opt/so/conf/soc/sigma_final_pipeline.yaml:/opt/sensoroni/sigma_final_pipeline.yaml:rw - /opt/so/conf/soc/custom.js:/opt/sensoroni/html/js/custom.js:ro - /opt/so/conf/soc/custom_roles:/opt/sensoroni/rbac/custom_roles:ro - /opt/so/conf/soc/soc_users_roles:/opt/sensoroni/rbac/users_roles:rw diff --git a/salt/soc/final_sigma_pipeline.yaml b/salt/soc/files/soc/final_sigma_pipeline.yaml similarity index 100% rename from salt/soc/final_sigma_pipeline.yaml rename to salt/soc/files/soc/final_sigma_pipeline.yaml diff --git a/salt/soc/so_sigma_pipeline.yaml b/salt/soc/files/soc/so_sigma_pipeline.yaml similarity index 100% rename from salt/soc/so_sigma_pipeline.yaml rename to salt/soc/files/soc/so_sigma_pipeline.yaml diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index fe672fe3e..f413b5c73 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -32,7 +32,7 @@ soc: global: True advanced: True helpLink: soc-customization.html - final_sigma_pipeline__yaml: + sigma_final_pipeline__yaml: title: Final Sigma Pipeline description: Final Processing Pipeline for Sigma Rules syntax: yaml From d3d2305f00d90a2e076a8d377fcd9e72ea97a229 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Wed, 7 Feb 2024 16:08:27 -0500 Subject: [PATCH 081/777] FEATURE: Add new dashboards for community_id and firewall auth #12323 --- salt/soc/defaults.yaml | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 86bb57c49..31b6eb588 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1424,8 +1424,11 @@ soc: - name: Zeek Notice description: Zeek notice logs query: 'event.dataset:zeek.notice | groupby -sankey notice.note destination.ip | groupby notice.note | groupby notice.message | groupby notice.sub_message | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' - - name: Connections - description: Network connection metadata + - name: Connections and Metadata with community_id + description: Network connections that include community_id + query: '_exists_:network.community_id | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid' + - name: Connections seen by Zeek or Suricata + description: Network connections logged by Zeek or Suricata query: 'tags:conn | groupby source.ip | groupby destination.ip | groupby destination.port | groupby -sankey destination.port network.protocol | groupby network.protocol | groupby network.transport | groupby connection.history | groupby connection.state | groupby connection.state_description | groupby source.geo.country_name | groupby destination.geo.country_name | groupby client.ip_bytes | groupby server.ip_bytes | groupby client.oui' - name: DCE_RPC description: DCE_RPC (Distributed Computing Environment / Remote Procedure Calls) network metadata @@ -1562,6 +1565,9 @@ soc: - name: Firewall description: Firewall logs query: 'observer.type:firewall | groupby -sankey event.action observer.ingress.interface.name | groupby event.action | groupby observer.ingress.interface.name | groupby network.type | groupby network.transport | groupby source.ip | groupby destination.ip | groupby destination.port' + - name: Firewall Auth + description: Firewall authentication logs + query: 'observer.type:firewall AND event.category:authentication | groupby user.name | groupby -sankey user.name source.ip | groupby source.ip | table soc_timestamp user.name source.ip message' - name: VLAN description: VLAN (Virtual Local Area Network) tagged logs query: '* AND _exists_:network.vlan.id | groupby network.vlan.id | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby event.dataset | groupby event.module | groupby observer.name | groupby source.geo.country_name | groupby destination.geo.country_name' From 81a3e95914d602386a11e2f5289a98fe59f97ece Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Wed, 7 Feb 2024 16:42:16 -0500 Subject: [PATCH 082/777] Fixup sigma pipelines --- .../soc/{final_sigma_pipeline.yaml => sigma_final_pipeline.yaml} | 0 .../files/soc/{so_sigma_pipeline.yaml => sigma_so_pipeline.yaml} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename salt/soc/files/soc/{final_sigma_pipeline.yaml => sigma_final_pipeline.yaml} (100%) rename salt/soc/files/soc/{so_sigma_pipeline.yaml => sigma_so_pipeline.yaml} (100%) diff --git a/salt/soc/files/soc/final_sigma_pipeline.yaml b/salt/soc/files/soc/sigma_final_pipeline.yaml similarity index 100% rename from salt/soc/files/soc/final_sigma_pipeline.yaml rename to salt/soc/files/soc/sigma_final_pipeline.yaml diff --git a/salt/soc/files/soc/so_sigma_pipeline.yaml b/salt/soc/files/soc/sigma_so_pipeline.yaml similarity index 100% rename from salt/soc/files/soc/so_sigma_pipeline.yaml rename to salt/soc/files/soc/sigma_so_pipeline.yaml From 8d0e8789bd949932df12397e3edafd4bfb4dd9e2 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Thu, 8 Feb 2024 09:54:51 -0500 Subject: [PATCH 083/777] Use salt file roots --- salt/elasticfleet/enabled.sls | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/salt/elasticfleet/enabled.sls b/salt/elasticfleet/enabled.sls index a84d51cfc..53ec6c1ef 100644 --- a/salt/elasticfleet/enabled.sls +++ b/salt/elasticfleet/enabled.sls @@ -45,12 +45,6 @@ so-elastic-fleet-auto-configure-artifact-urls: - name: /usr/sbin/so-elastic-fleet-artifacts-url-update - retry: True -elasticagentartifactssymlink: - file.symlink: - - name: /opt/so/saltstack/local/salt/beats - - target: /nsm/elastic-fleet/artifacts/beats - - user: socore - - group: socore {% endif %} # Sync Elastic Agent artifacts to Fleet Node @@ -58,7 +52,7 @@ elasticagentartifactssymlink: elasticagent_syncartifacts: file.recurse: - name: /nsm/elastic-fleet/artifacts/beats - - source: salt://beats + - source: salt://beats?saltenv=elasticartifacts {% endif %} {% if SERVICETOKEN != '' %} From 29174566f3f7a8307a92eaac2073f108eddedc57 Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Thu, 8 Feb 2024 09:44:56 -0700 Subject: [PATCH 084/777] WIP: Updated Detection Mappings, Changed Engine to Language Detection mappings updated to include the removal of Note and the addition of Tags, Ruleset, and Language. SOC defaults updated to use language based queries rather than engine and show the language column instead of the engine column in results. --- .../component/so/detection-mappings.json | 10 +++++++++- salt/soc/defaults.yaml | 18 +++++++++--------- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/salt/elasticsearch/templates/component/so/detection-mappings.json b/salt/elasticsearch/templates/component/so/detection-mappings.json index df53308f2..596236703 100644 --- a/salt/elasticsearch/templates/component/so/detection-mappings.json +++ b/salt/elasticsearch/templates/component/so/detection-mappings.json @@ -47,13 +47,21 @@ "isCommunity": { "type": "boolean" }, - "note": { + "tags": { "type": "text" }, + "ruleset": { + "ignore_above": 1024, + "type": "keyword" + }, "engine": { "ignore_above": 1024, "type": "keyword" }, + "language": { + "ignore_above": 1024, + "type": "keyword" + }, "overrides": { "properties": { "type": { diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 6811529bf..cc6b417e6 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1769,7 +1769,7 @@ soc: default: - so_detection.title - so_detection.isEnabled - - so_detection.engine + - so_detection.language - "@timestamp" queries: - name: "All Detections" @@ -1781,11 +1781,11 @@ soc: - name: "Disabled" query: "so_detection.isEnabled:false" - name: "Suricata" - query: "so_detection.engine:suricata" - - name: "ElastAlert" - query: "so_detection.engine:elastalert" - - name: "Strelka" - query: "so_detection.engine:strelka" + query: "so_detection.language:suricata" + - name: "Sigma" + query: "so_detection.language:sigma" + - name: "Yara" + query: "so_detection.language:yara" detection: presets: severity: @@ -1797,12 +1797,12 @@ soc: - medium - high - critical - engine: + language: customEnabled: false labels: - suricata - - elastalert - - strelka + - sigma + - yara severityTranslations: minor: low major: high From 683abf0179701d7abfedbfd6d6d08d9227f8b949 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Thu, 8 Feb 2024 13:24:25 -0500 Subject: [PATCH 085/777] Rework naming --- .../sbin_jinja/so-elastic-fleet-artifacts-url-update | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-artifacts-url-update b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-artifacts-url-update index ffbeaf6e1..721525668 100644 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-artifacts-url-update +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-artifacts-url-update @@ -35,7 +35,9 @@ declare -a NEW_LIST=() if grep -q "fleet" <<< "$LOGSTASHNODES"; then readarray -t FLEETNODES < <(jq -r '.fleet | keys_unsorted[]' <<< "$LOGSTASHNODES") for NODE in "${FLEETNODES[@]}"; do - NEW_LIST+=("http://$NODE:8443/artifacts/") + URL="http://$NODE:8443/artifacts/" + NAME="FleetServer_$NODE" + NEW_LIST+=("$URL=$NAME") done fi @@ -46,8 +48,11 @@ declare -A expected_urls=( ) # Merge NEW_LIST into expected_urls -for host in "${NEW_LIST[@]}"; do - expected_urls[$host]="FleetServer" +for entry in "${NEW_LIST[@]}"; do + # Extract URL and Name from each entry + IFS='=' read -r URL NAME <<< "$entry" + # Add to expected_urls, automatically handling URL as key and NAME as value + expected_urls["$URL"]="$NAME" done # Fetch the current hosts from the API From 3c9d6da1d8a81472aff15b56b1d2f75ce539acb9 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 8 Feb 2024 22:05:37 -0500 Subject: [PATCH 086/777] add putty to sod packages.sls Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/desktop/packages.sls | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/desktop/packages.sls b/salt/desktop/packages.sls index 9a7d53317..b2a028e60 100644 --- a/salt/desktop/packages.sls +++ b/salt/desktop/packages.sls @@ -334,6 +334,7 @@ desktop_packages: - pulseaudio-libs - pulseaudio-libs-glib2 - pulseaudio-utils + - putty - sane-airscan - sane-backends - sane-backends-drivers-cameras From 654602bf80fc344eadd21a861073066e260412e6 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Fri, 9 Feb 2024 09:30:18 -0500 Subject: [PATCH 087/777] Fixup shell --- .../elasticfleet/tools/sbin_jinja/so-elastic-agent-grid-upgrade | 2 ++ .../tools/sbin_jinja/so-elastic-fleet-es-url-update | 2 ++ .../tools/sbin_jinja/so-elastic-fleet-outputs-update | 2 ++ salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-urls-update | 2 ++ 4 files changed, 8 insertions(+) diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-agent-grid-upgrade b/salt/elasticfleet/tools/sbin_jinja/so-elastic-agent-grid-upgrade index b1ca8c476..b911f5896 100644 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-agent-grid-upgrade +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-agent-grid-upgrade @@ -1,3 +1,5 @@ +#!/bin/bash + # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one # or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use # this file except in compliance with the Elastic License 2.0. diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-es-url-update b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-es-url-update index 5d5b7e7e0..3da6b3e78 100644 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-es-url-update +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-es-url-update @@ -1,3 +1,5 @@ +#!/bin/bash + # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one # or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use # this file except in compliance with the Elastic License 2.0. diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update index 23a206921..eb5ccc1ed 100644 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update @@ -1,3 +1,5 @@ +#!/bin/bash + # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one # or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use # this file except in compliance with the Elastic License 2.0. diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-urls-update b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-urls-update index 31c7becca..5f7637cd3 100644 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-urls-update +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-urls-update @@ -1,3 +1,5 @@ +#!/bin/bash + # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one # or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use # this file except in compliance with the Elastic License 2.0. From 5903ae596cd3b322b79b86104c3fe0bb26d69cf3 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 9 Feb 2024 09:47:23 -0500 Subject: [PATCH 088/777] move suricata rules to /opt/so/rules/nids/suri --- files/salt/master/master | 2 +- salt/idstools/enabled.sls | 2 +- salt/idstools/etc/rulecat.conf | 8 ++++---- salt/idstools/sync_files.sls | 4 ++-- salt/manager/tools/sbin/soup | 4 +++- salt/suricata/config.sls | 6 +++--- salt/suricata/manager.sls | 4 ++-- 7 files changed, 16 insertions(+), 14 deletions(-) diff --git a/files/salt/master/master b/files/salt/master/master index e309a560b..6cf3b94ea 100644 --- a/files/salt/master/master +++ b/files/salt/master/master @@ -41,7 +41,7 @@ file_roots: base: - /opt/so/saltstack/local/salt - /opt/so/saltstack/default/salt - - /opt/so/rules + - /opt/so/rules/nids # The master_roots setting configures a master-only copy of the file_roots dictionary, diff --git a/salt/idstools/enabled.sls b/salt/idstools/enabled.sls index decc5a5b2..5e4c4c066 100644 --- a/salt/idstools/enabled.sls +++ b/salt/idstools/enabled.sls @@ -39,7 +39,7 @@ so-idstools: {% endif %} - binds: - /opt/so/conf/idstools/etc:/opt/so/idstools/etc:ro - - /opt/so/rules/nids:/opt/so/rules/nids:rw + - /opt/so/rules/nids/suri:/opt/so/rules/nids/suri:rw - /nsm/rules/:/nsm/rules/:rw {% if DOCKER.containers['so-idstools'].custom_bind_mounts %} {% for BIND in DOCKER.containers['so-idstools'].custom_bind_mounts %} diff --git a/salt/idstools/etc/rulecat.conf b/salt/idstools/etc/rulecat.conf index d6f3d93d8..f7c784413 100644 --- a/salt/idstools/etc/rulecat.conf +++ b/salt/idstools/etc/rulecat.conf @@ -1,10 +1,10 @@ {%- from 'vars/globals.map.jinja' import GLOBALS -%} {%- from 'idstools/map.jinja' import IDSTOOLSMERGED -%} ---merged=/opt/so/rules/nids/all.rules ---local=/opt/so/rules/nids/local.rules +--merged=/opt/so/rules/nids/suri/all.rules +--local=/opt/so/rules/nids/suri/local.rules {%- if GLOBALS.md_engine == "SURICATA" %} ---local=/opt/so/rules/nids/extraction.rules ---local=/opt/so/rules/nids/filters.rules +--local=/opt/so/rules/nids/suri/extraction.rules +--local=/opt/so/rules/nids/suri/filters.rules {%- endif %} --url=http://{{ GLOBALS.manager }}:7788/suricata/emerging-all.rules --disable=/opt/so/idstools/etc/disable.conf diff --git a/salt/idstools/sync_files.sls b/salt/idstools/sync_files.sls index 64479e937..cdacfaa74 100644 --- a/salt/idstools/sync_files.sls +++ b/salt/idstools/sync_files.sls @@ -21,7 +21,7 @@ idstoolsetcsync: rulesdir: file.directory: - - name: /opt/so/rules/nids + - name: /opt/so/rules/nids/suri - user: 939 - group: 939 - makedirs: True @@ -29,7 +29,7 @@ rulesdir: # Don't show changes because all.rules can be large synclocalnidsrules: file.recurse: - - name: /opt/so/rules/nids/ + - name: /opt/so/rules/nids/suri/ - source: salt://idstools/rules/ - user: 939 - group: 939 diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 5bade9891..02c7c01e0 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -605,7 +605,9 @@ up_to_2.4.50() { # post upgrade changes. POSTVERSION set to INSTALLEDVERSION at start of soup cp -v /etc/salt/master "/etc/salt/master.so-$POSTVERSION.bak" echo "Adding /opt/so/rules to file_roots in /etc/salt/master using so-yaml" - so-yaml.py append /etc/salt/master file_roots.base /opt/so/rules + so-yaml.py append /etc/salt/master file_roots.base /opt/so/rules/nids + echo "Moving Suricata rules" + mv -v /opt/so/rules/nids/* /opt/so/rules/nids/suri/. INSTALLEDVERSION=2.4.50 } diff --git a/salt/suricata/config.sls b/salt/suricata/config.sls index 4804565ce..e0a157a85 100644 --- a/salt/suricata/config.sls +++ b/salt/suricata/config.sls @@ -84,12 +84,12 @@ suridatadir: - mode: 770 - makedirs: True -# salt:// would resolve to /opt/so/rules because of the defined file_roots and -# nids not existing under /opt/so/saltstack/local/salt or /opt/so/saltstack/default/salt +# salt:// would resolve to /opt/so/rules/nids because of the defined file_roots and +# not existing under /opt/so/saltstack/local/salt or /opt/so/saltstack/default/salt surirulesync: file.recurse: - name: /opt/so/conf/suricata/rules/ - - source: salt://nids/ + - source: salt://suricata/ - user: 940 - group: 940 - show_changes: False diff --git a/salt/suricata/manager.sls b/salt/suricata/manager.sls index c196c5cae..3d5183556 100644 --- a/salt/suricata/manager.sls +++ b/salt/suricata/manager.sls @@ -13,7 +13,7 @@ ruleslink: - name: /opt/so/saltstack/local/salt/suricata/rules - user: socore - group: socore - - target: /opt/so/rules/nids + - target: /opt/so/rules/nids/suri refresh_salt_master_fileserver_suricata_ruleslink: salt.runner: @@ -27,4 +27,4 @@ refresh_salt_master_fileserver_suricata_ruleslink: test.fail_without_changes: - name: {{sls}}_state_not_allowed -{% endif %} \ No newline at end of file +{% endif %} From 2143881c0b560792e696cab50429b979822d1bac Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 9 Feb 2024 10:22:25 -0500 Subject: [PATCH 089/777] specify *.rules --- salt/manager/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 02c7c01e0..47d5addb5 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -607,7 +607,7 @@ up_to_2.4.50() { echo "Adding /opt/so/rules to file_roots in /etc/salt/master using so-yaml" so-yaml.py append /etc/salt/master file_roots.base /opt/so/rules/nids echo "Moving Suricata rules" - mv -v /opt/so/rules/nids/* /opt/so/rules/nids/suri/. + mv -v /opt/so/rules/nids/*.rules /opt/so/rules/nids/suri/. INSTALLEDVERSION=2.4.50 } From 213ac822a849defadac85c47e91f9114266b95ca Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 9 Feb 2024 10:54:07 -0500 Subject: [PATCH 090/777] create dir and chown --- salt/manager/tools/sbin/soup | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 47d5addb5..d93218db4 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -607,6 +607,8 @@ up_to_2.4.50() { echo "Adding /opt/so/rules to file_roots in /etc/salt/master using so-yaml" so-yaml.py append /etc/salt/master file_roots.base /opt/so/rules/nids echo "Moving Suricata rules" + mkdir /opt/so/rules/nids/suri + chown socore:socore /opt/so/rules/nids/suri mv -v /opt/so/rules/nids/*.rules /opt/so/rules/nids/suri/. INSTALLEDVERSION=2.4.50 From 304ae49251b6dd99b03785d6ea5c48c0d8c5d63f Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 9 Feb 2024 12:41:23 -0500 Subject: [PATCH 091/777] fix source --- salt/suricata/config.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/suricata/config.sls b/salt/suricata/config.sls index e0a157a85..3ec1324bf 100644 --- a/salt/suricata/config.sls +++ b/salt/suricata/config.sls @@ -89,7 +89,7 @@ suridatadir: surirulesync: file.recurse: - name: /opt/so/conf/suricata/rules/ - - source: salt://suricata/ + - source: salt://suri/ - user: 940 - group: 940 - show_changes: False From 64f6d0fba93180f0627b56b5adeb822afa8c5193 Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Fri, 9 Feb 2024 14:20:07 -0700 Subject: [PATCH 092/777] Updated Detection's ES Mappings Detection's now have a License field and the Comment model is defined now. --- .../component/so/detection-mappings.json | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/salt/elasticsearch/templates/component/so/detection-mappings.json b/salt/elasticsearch/templates/component/so/detection-mappings.json index 596236703..4efd2f73f 100644 --- a/salt/elasticsearch/templates/component/so/detection-mappings.json +++ b/salt/elasticsearch/templates/component/so/detection-mappings.json @@ -62,6 +62,10 @@ "ignore_above": 1024, "type": "keyword" }, + "license": { + "ignore_above": 1024, + "type": "keyword" + }, "overrides": { "properties": { "type": { @@ -106,6 +110,24 @@ } } } + }, + "so_comment": { + "properties": { + "createTime": { + "type": "date" + }, + "detectionId": { + "ignore_above": 1024, + "type": "keyword" + }, + "description": { + "type": "text" + }, + "userId": { + "ignore_above": 1024, + "type": "keyword" + } + } } } } From 66ac36a9440682efdea72d4018675b6252238f9c Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Sat, 10 Feb 2024 11:07:26 -0500 Subject: [PATCH 093/777] Update soup --- salt/manager/tools/sbin/soup | 3 +++ 1 file changed, 3 insertions(+) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index d93218db4..600cb5d4e 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -610,6 +610,9 @@ up_to_2.4.50() { mkdir /opt/so/rules/nids/suri chown socore:socore /opt/so/rules/nids/suri mv -v /opt/so/rules/nids/*.rules /opt/so/rules/nids/suri/. + + echo "Adding /nsm/elastic-fleet/artifacts to file_roots in /etc/salt/master using so-yaml" + so-yaml.py append /etc/salt/master file_roots.base /nsm/elastic-fleet/artifacts INSTALLEDVERSION=2.4.50 } From eafb5cf15eed65c0ff2050b431ccceb0e0761f46 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Sun, 11 Feb 2024 13:18:20 -0500 Subject: [PATCH 094/777] Change to file_root --- salt/elasticfleet/enabled.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticfleet/enabled.sls b/salt/elasticfleet/enabled.sls index 53ec6c1ef..5b0cff5df 100644 --- a/salt/elasticfleet/enabled.sls +++ b/salt/elasticfleet/enabled.sls @@ -52,7 +52,7 @@ so-elastic-fleet-auto-configure-artifact-urls: elasticagent_syncartifacts: file.recurse: - name: /nsm/elastic-fleet/artifacts/beats - - source: salt://beats?saltenv=elasticartifacts + - source: salt://beats {% endif %} {% if SERVICETOKEN != '' %} From cc0f25a4f7f400759a7a3845c3a3fbb4a8bdc26e Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Sun, 11 Feb 2024 13:30:20 -0500 Subject: [PATCH 095/777] Wait for ES to be ready --- salt/elasticfleet/enabled.sls | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/salt/elasticfleet/enabled.sls b/salt/elasticfleet/enabled.sls index fef85d24c..31c4e3469 100644 --- a/salt/elasticfleet/enabled.sls +++ b/salt/elasticfleet/enabled.sls @@ -17,6 +17,11 @@ include: - elasticfleet.sostatus - ssl +# Wait for Elasticsearch to be ready - no reason to try running Elastic Fleet server if ES is not ready +wait_for_elasticsearch: + cmd.run: + - name: so-elasticsearch-wait + # If enabled, automatically update Fleet Logstash Outputs {% if ELASTICFLEETMERGED.config.server.enable_auto_configuration and grains.role not in ['so-import', 'so-eval', 'so-fleet'] %} so-elastic-fleet-auto-configure-logstash-outputs: From 4b697b24061b38f4004c9753c96f538d321164e4 Mon Sep 17 00:00:00 2001 From: Jorge Reyes <94730068+reyesj2@users.noreply.github.com> Date: Mon, 12 Feb 2024 09:28:48 -0500 Subject: [PATCH 096/777] Remove unused file --- salt/stig/license.sls | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 salt/stig/license.sls diff --git a/salt/stig/license.sls b/salt/stig/license.sls deleted file mode 100644 index e69de29bb..000000000 From 5a4e11b2f8f4182330f49ad9b290163e503216c6 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 12 Feb 2024 16:09:47 -0500 Subject: [PATCH 097/777] Update soup Remove a function that isn't used any more --- salt/manager/tools/sbin/soup | 61 ------------------------------------ 1 file changed, 61 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 600cb5d4e..b572610ec 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -247,67 +247,6 @@ check_sudoers() { fi } -check_log_size_limit() { - local num_minion_pillars - num_minion_pillars=$(find /opt/so/saltstack/local/pillar/minions/ -type f | wc -l) - - if [[ $num_minion_pillars -gt 1 ]]; then - if find /opt/so/saltstack/local/pillar/minions/ -type f | grep -q "_heavynode"; then - lsl_msg='distributed' - fi - else - local minion_id - minion_id=$(lookup_salt_value "id" "" "grains" "" "local") - - local minion_arr - IFS='_' read -ra minion_arr <<< "$minion_id" - - local node_type="${minion_arr[0]}" - - local current_limit - # since it is possible for the salt-master service to be stopped when this is run, we need to check the pillar values locally - # we need to combine default local and default pillars before doing this so we can define --pillar-root in salt-call - local epoch_date=$(date +%s%N) - mkdir -vp /opt/so/saltstack/soup_tmp_${epoch_date}/ - cp -r /opt/so/saltstack/default/pillar/ /opt/so/saltstack/soup_tmp_${epoch_date}/ - # use \cp here to overwrite any pillar files from default with those in local for the tmp directory - \cp -r /opt/so/saltstack/local/pillar/ /opt/so/saltstack/soup_tmp_${epoch_date}/ - current_limit=$(salt-call pillar.get elasticsearch:log_size_limit --local --pillar-root=/opt/so/saltstack/soup_tmp_${epoch_date}/pillar --out=newline_values_only) - rm -rf /opt/so/saltstack/soup_tmp_${epoch_date}/ - - local percent - case $node_type in - 'standalone' | 'eval') - percent=50 - ;; - *) - percent=80 - ;; - esac - - local disk_dir="/" - if [ -d /nsm ]; then - disk_dir="/nsm" - fi - - local disk_size_1k - disk_size_1k=$(df $disk_dir | grep -v "^Filesystem" | awk '{print $2}') - - local ratio="1048576" - - local disk_size_gb - disk_size_gb=$( echo "$disk_size_1k" "$ratio" | awk '{print($1/$2)}' ) - - local new_limit - new_limit=$( echo "$disk_size_gb" "$percent" | awk '{printf("%.0f", $1 * ($2/100))}') - - if [[ $current_limit != "$new_limit" ]]; then - lsl_msg='single-node' - lsl_details=( "$current_limit" "$new_limit" "$minion_id" ) - fi - fi -} - check_os_updates() { # Check to see if there are OS updates echo "Checking for OS updates." From 510226944077b5ac460869d2eced99c0438ba169 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Mon, 12 Feb 2024 16:44:54 -0500 Subject: [PATCH 098/777] Update defaults --- salt/idstools/soc_idstools.yaml | 2 +- salt/soc/defaults.yaml | 2 +- salt/soc/soc_soc.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/idstools/soc_idstools.yaml b/salt/idstools/soc_idstools.yaml index 634f68803..7cf11dba4 100644 --- a/salt/idstools/soc_idstools.yaml +++ b/salt/idstools/soc_idstools.yaml @@ -8,7 +8,7 @@ idstools: global: True helpLink: rules.html ruleset: - description: Defines the ruleset you want to run. Options are ETOPEN or ETPRO. + description: Defines the ruleset you want to run. Options are ETOPEN or ETPRO. -- WARNING -- Changing the ruleset will remove all existing Suricata rules of the previous ruleset and their associated overrides.' global: True regex: ETPRO\b|ETOPEN\b helpLink: rules.html diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index cc6b417e6..fd2eaf8c0 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1050,7 +1050,7 @@ soc: - rbac/users_roles strelkaengine: compileYaraPythonScriptPath: /opt/so/conf/strelka/compile_yara.py - reposFolder: /nsm/rules/strelka/repos + reposFolder: /nsm/rules/yara/repos rulesRepos: - https://github.com/Security-Onion-Solutions/securityonion-yara yaraRulesFolder: /opt/sensoroni/yara diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index f413b5c73..fe0458820 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -72,7 +72,7 @@ soc: modules: elastalertengine: sigmaRulePackages: - description: 'One of the following: core | core+ | core++ | all' + description: 'Defines the Sigma Community Ruleset you want to run: core | core+ | core++ | all. -- WARNING -- Changing the ruleset will remove all existing Sigma rules of the previous ruleset and their associated overrides.' global: True advanced: False elastic: From ccb14485a3028b326bb26f78b7341bd312fc1059 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Mon, 12 Feb 2024 19:06:19 -0500 Subject: [PATCH 099/777] Fix conflicting id --- salt/elasticfleet/enabled.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticfleet/enabled.sls b/salt/elasticfleet/enabled.sls index 4fc738171..50290bf09 100644 --- a/salt/elasticfleet/enabled.sls +++ b/salt/elasticfleet/enabled.sls @@ -18,7 +18,7 @@ include: - ssl # Wait for Elasticsearch to be ready - no reason to try running Elastic Fleet server if ES is not ready -wait_for_elasticsearch: +wait_for_elasticsearch_elasticfleet: cmd.run: - name: so-elasticsearch-wait From 20d2f3b97e6170fb3216e6cfece9f9740582f5ba Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Mon, 12 Feb 2024 19:13:32 -0500 Subject: [PATCH 100/777] Update Sublime action in defaults.yaml to use i18n --- salt/soc/defaults.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 31b6eb588..341bee64a 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -59,8 +59,8 @@ soc: target: _blank links: - 'https://www.virustotal.com/gui/search/{value}' - - name: Sublime Platform Email Review - description: Review email in Sublime Platform + - name: actionSublime + description: actionSublimeHelp icon: fa-external-link-alt target: _blank links: From 0ad39a7e327131c533958526fa3ed96f71565293 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Mon, 12 Feb 2024 19:18:29 -0500 Subject: [PATCH 101/777] FEATURE: Add new SOC action to show process ancestry #12345 --- salt/soc/defaults.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 341bee64a..7573854c6 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -65,6 +65,12 @@ soc: target: _blank links: - 'https://{:sublime.url}/messages/{:sublime.message_group_id}' + - name: actionProcessAncestors + description: actionProcessAncestorsHelp + icon: fa-people-roof + target: '' + links: + - '/#/hunt?q=(process.entity_id:"{:process.entity_id}" OR process.entity_id:"{:process.Ext.ancestry|processAncestors}") | groupby process.parent.name | groupby -sankey process.parent.name process.name | groupby process.name | groupby event.module event.dataset | table soc_timestamp event.dataset user.name process.executable process.command_line process.working_directory' eventFields: default: - soc_timestamp From ea80469c2db1bc690e26e4a7e5cf5c1afd44bc3d Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Mon, 12 Feb 2024 19:39:55 -0500 Subject: [PATCH 102/777] Detection Default queries --- salt/soc/defaults.yaml | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index fd2eaf8c0..29cd7e1ac 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1770,21 +1770,23 @@ soc: - so_detection.title - so_detection.isEnabled - so_detection.language - - "@timestamp" + - so_detection.severity queries: - name: "All Detections" query: "_id:*" - - name: "Local Rules" + - name: "Custom Detections" query: "so_detection.isCommunity:false" - - name: "Enabled" + - name: "All Detections - Enabled" query: "so_detection.isEnabled:true" - - name: "Disabled" + - name: "All Detections - Disabled" query: "so_detection.isEnabled:false" - - name: "Suricata" + - name: "Detection Type - Suricata (NIDS)" query: "so_detection.language:suricata" - - name: "Sigma" + - name: "Detection Type - Sigma - All" query: "so_detection.language:sigma" - - name: "Yara" + - name: "Detection Type - Sigma - Windows" + query: 'so_detection.language:sigma AND so_detection.content: "*product: windows*"' + - name: "Detection Type - Yara (Strelka)" query: "so_detection.language:yara" detection: presets: From 3efaba11041df34e3bb74762241e013229c4238e Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 13 Feb 2024 11:04:26 -0500 Subject: [PATCH 103/777] modify soup to update soup scripts without using salt --- salt/common/init.sls | 13 ++++++++++++- salt/common/soup_scripts.sls | 23 ----------------------- salt/manager/tools/sbin/soup | 23 ++++++++--------------- 3 files changed, 20 insertions(+), 39 deletions(-) delete mode 100644 salt/common/soup_scripts.sls diff --git a/salt/common/init.sls b/salt/common/init.sls index 5f13c3893..51836daf6 100644 --- a/salt/common/init.sls +++ b/salt/common/init.sls @@ -4,7 +4,6 @@ {% from 'vars/globals.map.jinja' import GLOBALS %} include: - - common.soup_scripts - common.packages {% if GLOBALS.role in GLOBALS.manager_roles %} - manager.elasticsearch # needed for elastic_curl_config state @@ -134,6 +133,18 @@ common_sbin_jinja: - file_mode: 755 - template: jinja +{% if not GLOBALS.is_manager%} +# prior to 2.4.50 these scripts were in common/tools/sbin on the manager because of soup and distributed to non managers +# these two states remove the scripts from non manager nodes +remove_soup: + file.absent: + - name: /usr/sbin/soup + +remove_so-firewall: + file.absent: + - name: /usr/sbin/so-firewall +{% endif %} + so-status_script: file.managed: - name: /usr/sbin/so-status diff --git a/salt/common/soup_scripts.sls b/salt/common/soup_scripts.sls deleted file mode 100644 index 041649200..000000000 --- a/salt/common/soup_scripts.sls +++ /dev/null @@ -1,23 +0,0 @@ -# Sync some Utilities -soup_scripts: - file.recurse: - - name: /usr/sbin - - user: root - - group: root - - file_mode: 755 - - source: salt://common/tools/sbin - - include_pat: - - so-common - - so-image-common - -soup_manager_scripts: - file.recurse: - - name: /usr/sbin - - user: root - - group: root - - file_mode: 755 - - source: salt://manager/tools/sbin - - include_pat: - - so-firewall - - so-repo-sync - - soup diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 600cb5d4e..3254a61dd 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -794,21 +794,14 @@ verify_latest_update_script() { echo "This version of the soup script is up to date. Proceeding." else echo "You are not running the latest soup version. Updating soup and its components. This might take multiple runs to complete." - cp $UPDATE_DIR/salt/manager/tools/sbin/soup $DEFAULT_SALT_DIR/salt/manager/tools/sbin/ - cp $UPDATE_DIR/salt/common/tools/sbin/so-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/ - cp $UPDATE_DIR/salt/common/tools/sbin/so-image-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/ - cp $UPDATE_DIR/salt/manager/tools/sbin/so-firewall $DEFAULT_SALT_DIR/salt/manager/tools/sbin/ - salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local - # Verify that soup scripts updated as expected - get_soup_script_hashes - if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" && "$CURRENTSOFIREWALL" == "$GITSOFIREWALL" ]]; then - echo "Succesfully updated soup scripts." - else - # When STIGs are enabled soup scripts will fail to update using --file-root --local. - # After checking that the expected hashes are not present, retry updating soup scripts using salt master. - echo "There was a problem updating soup scripts.. Trying to rerun script update" - salt-call state.apply common.soup_scripts queue=True -linfo - fi + rm -f $DEFAULT_SALT_DIR/salt/common/tools/sbin/soup + rm -f $DEFAULT_SALT_DIR/salt/common/tools/sbin/so-firewall + + cp $UPDATE_DIR/salt/common/tools/sbin/* $DEFAULT_SALT_DIR/salt/common/tools/sbin/. + cp $UPDATE_DIR/salt/common/tools/sbin/* /usr/sbin/. + cp $UPDATE_DIR/salt/manager/tools/sbin/* $DEFAULT_SALT_DIR/salt/manager/tools/sbin/. + cp $UPDATE_DIR/salt/manager/tools/sbin/* /usr/sbin/. + echo "" echo "The soup script has been modified. Please run soup again to continue the upgrade." exit 0 From 92634724c40a97612fb254d0b84c068df2cc5742 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 13 Feb 2024 11:09:08 -0500 Subject: [PATCH 104/777] move rm --- salt/manager/tools/sbin/soup | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 3254a61dd..02dd1272b 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -794,14 +794,14 @@ verify_latest_update_script() { echo "This version of the soup script is up to date. Proceeding." else echo "You are not running the latest soup version. Updating soup and its components. This might take multiple runs to complete." - rm -f $DEFAULT_SALT_DIR/salt/common/tools/sbin/soup - rm -f $DEFAULT_SALT_DIR/salt/common/tools/sbin/so-firewall - cp $UPDATE_DIR/salt/common/tools/sbin/* $DEFAULT_SALT_DIR/salt/common/tools/sbin/. cp $UPDATE_DIR/salt/common/tools/sbin/* /usr/sbin/. cp $UPDATE_DIR/salt/manager/tools/sbin/* $DEFAULT_SALT_DIR/salt/manager/tools/sbin/. cp $UPDATE_DIR/salt/manager/tools/sbin/* /usr/sbin/. + rm -f $DEFAULT_SALT_DIR/salt/common/tools/sbin/soup + rm -f $DEFAULT_SALT_DIR/salt/common/tools/sbin/so-firewall + echo "" echo "The soup script has been modified. Please run soup again to continue the upgrade." exit 0 From 8060751a667fe6d1b9d910a39ac5f851ac4d691e Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Tue, 13 Feb 2024 12:24:33 -0500 Subject: [PATCH 105/777] Add table columns to process dashboard in defaults.yaml --- salt/soc/defaults.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 7573854c6..ba557d64d 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1417,7 +1417,7 @@ soc: query: 'event.category: network AND _exists_:process.executable AND (_exists_:dns.question.name OR _exists_:dns.answers.data) | groupby -sankey host.name dns.question.name | groupby event.dataset event.type | groupby host.name | groupby process.executable | groupby dns.question.name | groupby dns.answers.data' - name: Host Process Activity description: Process activity captured on an endpoint - query: 'event.category:process | groupby -sankey host.name user.name* | groupby event.dataset event.action | groupby host.name | groupby user.name | groupby process.working_directory | groupby process.executable | groupby process.command_line | groupby process.parent.executable | groupby process.parent.command_line | groupby -sankey process.parent.executable process.executable' + query: 'event.category:process | groupby -sankey host.name user.name* | groupby event.dataset event.action | groupby host.name | groupby user.name | groupby process.working_directory | groupby process.executable | groupby process.command_line | groupby process.parent.executable | groupby process.parent.command_line | groupby -sankey process.parent.executable process.executable | table soc_timestamp host.name user.name process.working_directory process.parent.name process.name' - name: Host File Activity description: File activity captured on an endpoint query: 'event.category: file AND _exists_:process.executable | groupby -sankey host.name process.executable | groupby host.name | groupby event.dataset event.action event.type | groupby file.name | groupby process.executable' From b713771494659b098f16c1d7b0eb37501721cf66 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 13 Feb 2024 12:30:36 -0500 Subject: [PATCH 106/777] add back common soup_scripts state --- salt/common/soup_scripts.sls | 24 ++++++++++++++++++++++++ salt/manager/tools/sbin/soup | 10 ++++++---- 2 files changed, 30 insertions(+), 4 deletions(-) create mode 100644 salt/common/soup_scripts.sls diff --git a/salt/common/soup_scripts.sls b/salt/common/soup_scripts.sls new file mode 100644 index 000000000..402ad9c4a --- /dev/null +++ b/salt/common/soup_scripts.sls @@ -0,0 +1,24 @@ +remove_common_soup: + file.absent: + - name: /opt/so/saltstack/default/salt/common/tools/sbin/soup + +remove_common_so-firewall: + file.absent: + - name: /opt/so/saltstack/default/salt/common/tools/sbin/so-firewall + +# Sync some Utilities +soup_scripts: + file.recurse: + - name: /usr/sbin + - user: root + - group: root + - file_mode: 755 + - source: salt://common/tools/sbin + +soup_manager_scripts: + file.recurse: + - name: /usr/sbin + - user: root + - group: root + - file_mode: 755 + - source: salt://manager/tools/sbin diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 02dd1272b..75be97928 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -795,12 +795,14 @@ verify_latest_update_script() { else echo "You are not running the latest soup version. Updating soup and its components. This might take multiple runs to complete." cp $UPDATE_DIR/salt/common/tools/sbin/* $DEFAULT_SALT_DIR/salt/common/tools/sbin/. - cp $UPDATE_DIR/salt/common/tools/sbin/* /usr/sbin/. + #cp $UPDATE_DIR/salt/common/tools/sbin/* /usr/sbin/. cp $UPDATE_DIR/salt/manager/tools/sbin/* $DEFAULT_SALT_DIR/salt/manager/tools/sbin/. - cp $UPDATE_DIR/salt/manager/tools/sbin/* /usr/sbin/. + #cp $UPDATE_DIR/salt/manager/tools/sbin/* /usr/sbin/. - rm -f $DEFAULT_SALT_DIR/salt/common/tools/sbin/soup - rm -f $DEFAULT_SALT_DIR/salt/common/tools/sbin/so-firewall + #rm -f $DEFAULT_SALT_DIR/salt/common/tools/sbin/soup + #rm -f $DEFAULT_SALT_DIR/salt/common/tools/sbin/so-firewall + + salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local echo "" echo "The soup script has been modified. Please run soup again to continue the upgrade." From 5c9b1ab38b052e15f363daad0e6ffc43ddd8d8fb Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 13 Feb 2024 12:48:31 -0500 Subject: [PATCH 107/777] copy with cp --- salt/common/soup_scripts.sls | 27 ++++++++++++--------------- salt/manager/tools/sbin/soup | 4 ++-- 2 files changed, 14 insertions(+), 17 deletions(-) diff --git a/salt/common/soup_scripts.sls b/salt/common/soup_scripts.sls index 402ad9c4a..a4fafd6e3 100644 --- a/salt/common/soup_scripts.sls +++ b/salt/common/soup_scripts.sls @@ -6,19 +6,16 @@ remove_common_so-firewall: file.absent: - name: /opt/so/saltstack/default/salt/common/tools/sbin/so-firewall -# Sync some Utilities -soup_scripts: - file.recurse: - - name: /usr/sbin - - user: root - - group: root - - file_mode: 755 - - source: salt://common/tools/sbin +{% if pillar.global.airgap %} +{% set UPDATE_DIR='/tmp/soagupdate/securityonion'%} +{% else %} +{% set UPDATE_DIR='/tmp/sogh/securityonion'%} +{% endif %} -soup_manager_scripts: - file.recurse: - - name: /usr/sbin - - user: root - - group: root - - file_mode: 755 - - source: salt://manager/tools/sbin +copy_common: + cmd.run: + - name: "\cp " ~ {{ $UPDATE_DIR }} ~ "/salt/common/tools/sbin/* /usr/sbin/." + +copy_manager: + cmd.run: + - name: "\cp " ~ {{ $UPDATE_DIR }} ~ "/salt/manager/tools/sbin/* /usr/sbin/." diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 75be97928..2dfad1bbb 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -795,9 +795,9 @@ verify_latest_update_script() { else echo "You are not running the latest soup version. Updating soup and its components. This might take multiple runs to complete." cp $UPDATE_DIR/salt/common/tools/sbin/* $DEFAULT_SALT_DIR/salt/common/tools/sbin/. - #cp $UPDATE_DIR/salt/common/tools/sbin/* /usr/sbin/. + cp $UPDATE_DIR/salt/common/tools/sbin/* /usr/sbin/. cp $UPDATE_DIR/salt/manager/tools/sbin/* $DEFAULT_SALT_DIR/salt/manager/tools/sbin/. - #cp $UPDATE_DIR/salt/manager/tools/sbin/* /usr/sbin/. + cp $UPDATE_DIR/salt/manager/tools/sbin/* /usr/sbin/. #rm -f $DEFAULT_SALT_DIR/salt/common/tools/sbin/soup #rm -f $DEFAULT_SALT_DIR/salt/common/tools/sbin/so-firewall From d7f853b5b2d48f30518b1629de03fa1bbcbd2d6d Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 13 Feb 2024 12:50:22 -0500 Subject: [PATCH 108/777] comment out script copy in soup --- salt/manager/tools/sbin/soup | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 2dfad1bbb..75be97928 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -795,9 +795,9 @@ verify_latest_update_script() { else echo "You are not running the latest soup version. Updating soup and its components. This might take multiple runs to complete." cp $UPDATE_DIR/salt/common/tools/sbin/* $DEFAULT_SALT_DIR/salt/common/tools/sbin/. - cp $UPDATE_DIR/salt/common/tools/sbin/* /usr/sbin/. + #cp $UPDATE_DIR/salt/common/tools/sbin/* /usr/sbin/. cp $UPDATE_DIR/salt/manager/tools/sbin/* $DEFAULT_SALT_DIR/salt/manager/tools/sbin/. - cp $UPDATE_DIR/salt/manager/tools/sbin/* /usr/sbin/. + #cp $UPDATE_DIR/salt/manager/tools/sbin/* /usr/sbin/. #rm -f $DEFAULT_SALT_DIR/salt/common/tools/sbin/soup #rm -f $DEFAULT_SALT_DIR/salt/common/tools/sbin/so-firewall From 0741ae370af217155b9075c865715d469e4edfda Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Tue, 13 Feb 2024 12:51:26 -0500 Subject: [PATCH 109/777] Update defaults.yaml --- salt/soc/defaults.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index ba557d64d..244a021d3 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -70,7 +70,7 @@ soc: icon: fa-people-roof target: '' links: - - '/#/hunt?q=(process.entity_id:"{:process.entity_id}" OR process.entity_id:"{:process.Ext.ancestry|processAncestors}") | groupby process.parent.name | groupby -sankey process.parent.name process.name | groupby process.name | groupby event.module event.dataset | table soc_timestamp event.dataset user.name process.executable process.command_line process.working_directory' + - '/#/hunt?q=(process.entity_id:"{:process.entity_id}" OR process.entity_id:"{:process.Ext.ancestry|processAncestors}") | groupby process.parent.name | groupby -sankey process.parent.name process.name | groupby process.name | groupby event.module event.dataset | table soc_timestamp event.dataset host.name user.name process.parent.name process.name process.working_directory' eventFields: default: - soc_timestamp @@ -1417,7 +1417,7 @@ soc: query: 'event.category: network AND _exists_:process.executable AND (_exists_:dns.question.name OR _exists_:dns.answers.data) | groupby -sankey host.name dns.question.name | groupby event.dataset event.type | groupby host.name | groupby process.executable | groupby dns.question.name | groupby dns.answers.data' - name: Host Process Activity description: Process activity captured on an endpoint - query: 'event.category:process | groupby -sankey host.name user.name* | groupby event.dataset event.action | groupby host.name | groupby user.name | groupby process.working_directory | groupby process.executable | groupby process.command_line | groupby process.parent.executable | groupby process.parent.command_line | groupby -sankey process.parent.executable process.executable | table soc_timestamp host.name user.name process.working_directory process.parent.name process.name' + query: 'event.category:process | groupby -sankey host.name user.name* | groupby event.dataset event.action | groupby host.name | groupby user.name | groupby process.working_directory | groupby process.executable | groupby process.command_line | groupby process.parent.executable | groupby process.parent.command_line | groupby -sankey process.parent.executable process.executable | table soc_timestamp event.dataset host.name user.name process.parent.name process.name process.working_directory' - name: Host File Activity description: File activity captured on an endpoint query: 'event.category: file AND _exists_:process.executable | groupby -sankey host.name process.executable | groupby host.name | groupby event.dataset event.action event.type | groupby file.name | groupby process.executable' From 1bde002f20a1a6f58cb78c57be8a70fd94c7689d Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 13 Feb 2024 12:51:53 -0500 Subject: [PATCH 110/777] update case --- salt/common/soup_scripts.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/common/soup_scripts.sls b/salt/common/soup_scripts.sls index a4fafd6e3..346b63c96 100644 --- a/salt/common/soup_scripts.sls +++ b/salt/common/soup_scripts.sls @@ -7,7 +7,7 @@ remove_common_so-firewall: - name: /opt/so/saltstack/default/salt/common/tools/sbin/so-firewall {% if pillar.global.airgap %} -{% set UPDATE_DIR='/tmp/soagupdate/securityonion'%} +{% set UPDATE_DIR='/tmp/soagupdate/SecurityOnion'%} {% else %} {% set UPDATE_DIR='/tmp/sogh/securityonion'%} {% endif %} From 9175a7345609a58602d20bc548f9b35f4b2bd497 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 13 Feb 2024 13:08:09 -0500 Subject: [PATCH 111/777] dont need $ for vars --- salt/common/soup_scripts.sls | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/common/soup_scripts.sls b/salt/common/soup_scripts.sls index 346b63c96..4a4fe80c8 100644 --- a/salt/common/soup_scripts.sls +++ b/salt/common/soup_scripts.sls @@ -14,8 +14,8 @@ remove_common_so-firewall: copy_common: cmd.run: - - name: "\cp " ~ {{ $UPDATE_DIR }} ~ "/salt/common/tools/sbin/* /usr/sbin/." + - name: "\cp " ~ {{ UPDATE_DIR }} ~ "/salt/common/tools/sbin/* /usr/sbin/." copy_manager: cmd.run: - - name: "\cp " ~ {{ $UPDATE_DIR }} ~ "/salt/manager/tools/sbin/* /usr/sbin/." + - name: "\cp " ~ {{ UPDATE_DIR }} ~ "/salt/manager/tools/sbin/* /usr/sbin/." From d6ac7a32869c4c1ff84ab762eb261606576e66d2 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 13 Feb 2024 13:31:34 -0500 Subject: [PATCH 112/777] fix the jinja --- salt/common/soup_scripts.sls | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/common/soup_scripts.sls b/salt/common/soup_scripts.sls index 4a4fe80c8..86042a880 100644 --- a/salt/common/soup_scripts.sls +++ b/salt/common/soup_scripts.sls @@ -6,7 +6,7 @@ remove_common_so-firewall: file.absent: - name: /opt/so/saltstack/default/salt/common/tools/sbin/so-firewall -{% if pillar.global.airgap %} +{% if salt['pillar.get']('global:airgap') %} {% set UPDATE_DIR='/tmp/soagupdate/SecurityOnion'%} {% else %} {% set UPDATE_DIR='/tmp/sogh/securityonion'%} @@ -14,8 +14,8 @@ remove_common_so-firewall: copy_common: cmd.run: - - name: "\cp " ~ {{ UPDATE_DIR }} ~ "/salt/common/tools/sbin/* /usr/sbin/." + - name: "cp {{UPDATE_DIR}}/salt/common/tools/sbin/* /usr/sbin/." copy_manager: cmd.run: - - name: "\cp " ~ {{ UPDATE_DIR }} ~ "/salt/manager/tools/sbin/* /usr/sbin/." + - name: "cp {{UPDATE_DIR}}/salt/manager/tools/sbin/* /usr/sbin/." From 0c6c6ba2d5d2e529ab6239f53ea6bd38d8a13446 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Tue, 13 Feb 2024 13:38:43 -0500 Subject: [PATCH 113/777] Various UI tweaks --- salt/soc/config.sls | 9 ++++++++- salt/soc/defaults.yaml | 6 +++--- salt/soc/enabled.sls | 2 +- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/salt/soc/config.sls b/salt/soc/config.sls index 549bf94cf..e4dad8df2 100644 --- a/salt/soc/config.sls +++ b/salt/soc/config.sls @@ -9,9 +9,16 @@ include: - manager.sync_es_users +socdirtest: + file.directory: + - name: /opt/so/rules/elastalert/rules + - user: 939 + - group: 939 + - makedirs: True + socdir: file.directory: - - name: /opt/so/conf/soc + - name: /opt/so/conf/soc/fingerprints - user: 939 - group: 939 - makedirs: True diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 29cd7e1ac..c060698b4 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1006,7 +1006,7 @@ soc: communityRulesImportFrequencySeconds: 180 elastAlertRulesFolder: /opt/sensoroni/elastalert rulesFingerprintFile: /opt/sensoroni/fingerprints/sigma.fingerprint - sigmaRulePackages: all + sigmaRulePackages: core elastic: hostUrl: remoteHostUrls: [] @@ -1050,10 +1050,10 @@ soc: - rbac/users_roles strelkaengine: compileYaraPythonScriptPath: /opt/so/conf/strelka/compile_yara.py - reposFolder: /nsm/rules/yara/repos + reposFolder: /opt/sensoroni/yara/repos rulesRepos: - https://github.com/Security-Onion-Solutions/securityonion-yara - yaraRulesFolder: /opt/sensoroni/yara + yaraRulesFolder: /opt/sensoroni/yara/rules suricataengine: communityRulesFile: /nsm/rules/suricata/emerging-all.rules rulesFingerprintFile: /opt/sensoroni/fingerprints/emerging-all.fingerprint diff --git a/salt/soc/enabled.sls b/salt/soc/enabled.sls index 535423179..7c04da825 100644 --- a/salt/soc/enabled.sls +++ b/salt/soc/enabled.sls @@ -23,7 +23,7 @@ so-soc: - ipv4_address: {{ DOCKER.containers['so-soc'].ip }} - binds: - /nsm/rules:/nsm/rules:rw #Need to tighten this up? - - /opt/so/rules/yara:/opt/sensoroni/yara:rw + - /opt/so/conf/strelka:/opt/sensoroni/yara:rw - /opt/so/rules/elastalert/rules:/opt/sensoroni/elastalert:rw - /opt/so/conf/soc/fingerprints:/opt/sensoroni/fingerprints:rw - /nsm/soc/jobs:/opt/sensoroni/jobs:rw From 7112337c85ba0b75ac56d46da2965c0657a2cd66 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 13 Feb 2024 13:52:14 -0500 Subject: [PATCH 114/777] fix copy --- salt/manager/tools/sbin/soup | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 75be97928..b3df3eb6f 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -794,13 +794,8 @@ verify_latest_update_script() { echo "This version of the soup script is up to date. Proceeding." else echo "You are not running the latest soup version. Updating soup and its components. This might take multiple runs to complete." - cp $UPDATE_DIR/salt/common/tools/sbin/* $DEFAULT_SALT_DIR/salt/common/tools/sbin/. - #cp $UPDATE_DIR/salt/common/tools/sbin/* /usr/sbin/. - cp $UPDATE_DIR/salt/manager/tools/sbin/* $DEFAULT_SALT_DIR/salt/manager/tools/sbin/. - #cp $UPDATE_DIR/salt/manager/tools/sbin/* /usr/sbin/. - - #rm -f $DEFAULT_SALT_DIR/salt/common/tools/sbin/soup - #rm -f $DEFAULT_SALT_DIR/salt/common/tools/sbin/so-firewall + \cp -v $UPDATE_DIR/salt/common/tools/sbin/* $DEFAULT_SALT_DIR/salt/common/tools/sbin/. + \cp -v $UPDATE_DIR/salt/manager/tools/sbin/* $DEFAULT_SALT_DIR/salt/manager/tools/sbin/. salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local From 141fd49f02ec03a5639a6b2908c16d1ddbb81bd9 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 13 Feb 2024 14:27:22 -0500 Subject: [PATCH 115/777] use rsync --- salt/common/soup_scripts.sls | 16 ++++++++++++---- salt/manager/tools/sbin/soup | 2 -- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/salt/common/soup_scripts.sls b/salt/common/soup_scripts.sls index 86042a880..b4b3504d1 100644 --- a/salt/common/soup_scripts.sls +++ b/salt/common/soup_scripts.sls @@ -12,10 +12,18 @@ remove_common_so-firewall: {% set UPDATE_DIR='/tmp/sogh/securityonion'%} {% endif %} -copy_common: +copy_common_tools_sbin: cmd.run: - - name: "cp {{UPDATE_DIR}}/salt/common/tools/sbin/* /usr/sbin/." + - name: "rsync -avh {{UPDATE_DIR}}/salt/common/tools/sbin/* /opt/so/saltstack/default/salt/common/tools/sbin/" -copy_manager: +copy_manager_tools_sbin: cmd.run: - - name: "cp {{UPDATE_DIR}}/salt/manager/tools/sbin/* /usr/sbin/." + - name: "rsync -avh {{UPDATE_DIR}}/salt/manager/tools/sbin/* /opt/so/saltstack/default/salt/manager/tools/sbin/" + +copy_common_sbin: + cmd.run: + - name: "rsync -avh {{UPDATE_DIR}}/salt/common/tools/sbin/* /usr/sbin/" + +copy_manager_sbin: + cmd.run: + - name: "rsync -avh {{UPDATE_DIR}}/salt/manager/tools/sbin/* /usr/sbin/" diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index b3df3eb6f..c4eef3994 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -794,8 +794,6 @@ verify_latest_update_script() { echo "This version of the soup script is up to date. Proceeding." else echo "You are not running the latest soup version. Updating soup and its components. This might take multiple runs to complete." - \cp -v $UPDATE_DIR/salt/common/tools/sbin/* $DEFAULT_SALT_DIR/salt/common/tools/sbin/. - \cp -v $UPDATE_DIR/salt/manager/tools/sbin/* $DEFAULT_SALT_DIR/salt/manager/tools/sbin/. salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local From 0d297274c8fc4f278e7b9e8530bdd39856425fb9 Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Tue, 13 Feb 2024 12:53:18 -0700 Subject: [PATCH 116/777] DetectionComment Mapping Defined --- .../templates/component/so/detection-mappings.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/elasticsearch/templates/component/so/detection-mappings.json b/salt/elasticsearch/templates/component/so/detection-mappings.json index 4efd2f73f..9b68421e7 100644 --- a/salt/elasticsearch/templates/component/so/detection-mappings.json +++ b/salt/elasticsearch/templates/component/so/detection-mappings.json @@ -111,7 +111,7 @@ } } }, - "so_comment": { + "so_detectioncomment": { "properties": { "createTime": { "type": "date" @@ -120,7 +120,7 @@ "ignore_above": 1024, "type": "keyword" }, - "description": { + "value": { "type": "text" }, "userId": { From 88786e83427ae49f236aa9f25c28f80684ec6845 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 13 Feb 2024 15:05:09 -0500 Subject: [PATCH 117/777] use file.copy to preserve perms --- salt/common/soup_scripts.sls | 64 +++++++++++++++++++++++++++++------- 1 file changed, 52 insertions(+), 12 deletions(-) diff --git a/salt/common/soup_scripts.sls b/salt/common/soup_scripts.sls index b4b3504d1..809886266 100644 --- a/salt/common/soup_scripts.sls +++ b/salt/common/soup_scripts.sls @@ -12,18 +12,58 @@ remove_common_so-firewall: {% set UPDATE_DIR='/tmp/sogh/securityonion'%} {% endif %} -copy_common_tools_sbin: - cmd.run: - - name: "rsync -avh {{UPDATE_DIR}}/salt/common/tools/sbin/* /opt/so/saltstack/default/salt/common/tools/sbin/" +copy_so-common_common_tools_sbin: + file.copy: + - name: /opt/so/saltstack/default/salt/common/tools/sbin/so-common + - source: {{UPDATE_DIR}}/salt/common/tools/sbin/so-common + - force: True + - preserve: True -copy_manager_tools_sbin: - cmd.run: - - name: "rsync -avh {{UPDATE_DIR}}/salt/manager/tools/sbin/* /opt/so/saltstack/default/salt/manager/tools/sbin/" +copy_so-image-common_common_tools_sbin: + file.copy: + - name: /opt/so/saltstack/default/salt/common/tools/sbin/so-image-common + - source: {{UPDATE_DIR}}/salt/common/tools/sbin/so-image-common + - force: True + - preserve: True -copy_common_sbin: - cmd.run: - - name: "rsync -avh {{UPDATE_DIR}}/salt/common/tools/sbin/* /usr/sbin/" +copy_soup_manager_tools_sbin: + file.copy: + - name: /opt/so/saltstack/default/salt/manager/tools/sbin/soup + - source: {{UPDATE_DIR}}/salt/manager/tools/sbin/soup + - force: True + - preserve: True -copy_manager_sbin: - cmd.run: - - name: "rsync -avh {{UPDATE_DIR}}/salt/manager/tools/sbin/* /usr/sbin/" +copy_so-firewall_manager_tools_sbin: + file.copy: + - name: /opt/so/saltstack/default/salt/manager/tools/sbin/so-firewall + - source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-firewall + - force: True + - preserve: True + +copy_so-common_sbin: + file.copy: + - name: /usr/sbin/so-common + - source: {{UPDATE_DIR}}/salt/common/tools/sbin/so-common + - force: True + - preserve: True + +copy_so-image-common_sbin: + file.copy: + - name: /usr/sbin/so-image-common + - source: {{UPDATE_DIR}}/salt/common/tools/sbin/so-image-common + - force: True + - preserve: True + +copy_soup_sbin: + file.copy: + - name: /usr/sbin/soup + - source: {{UPDATE_DIR}}/salt/manager/tools/sbin/soup + - force: True + - preserve: True + +copy_so-firewall_sbin: + file.copy: + - name: /usr/so-firewall + - source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-firewall + - force: True + - preserve: True From 468eedfaeb7288d7ed3657da18856b297fe00f29 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 13 Feb 2024 15:30:24 -0500 Subject: [PATCH 118/777] add soup script update retru --- salt/manager/tools/sbin/soup | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index c4eef3994..0e982e51f 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -797,6 +797,14 @@ verify_latest_update_script() { salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local + get_soup_script_hashes + if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" && "$CURRENTSOFIREWALL" == "$GITSOFIREWALL" ]]; then + echo "Succesfully updated soup scripts." + else + echo "There was a problem updating soup scripts. Trying to rerun script update." + salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local + fi + echo "" echo "The soup script has been modified. Please run soup again to continue the upgrade." exit 0 From 00f2374582d915c02dab51e2710bd7fb6764caca Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 13 Feb 2024 15:43:02 -0500 Subject: [PATCH 119/777] fix path for so-firewall --- salt/common/soup_scripts.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/common/soup_scripts.sls b/salt/common/soup_scripts.sls index 809886266..c02f11172 100644 --- a/salt/common/soup_scripts.sls +++ b/salt/common/soup_scripts.sls @@ -63,7 +63,7 @@ copy_soup_sbin: copy_so-firewall_sbin: file.copy: - - name: /usr/so-firewall + - name: /usr/sbin/so-firewall - source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-firewall - force: True - preserve: True From 031ee078c528b614d398c6420cc5ab1f09bd45ac Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Thu, 19 Oct 2023 15:49:56 -0600 Subject: [PATCH 120/777] socsigmarepo Need write permissions on the /opt/so/rules dir so I can clone the sigma repo there. --- salt/soc/config.sls | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/salt/soc/config.sls b/salt/soc/config.sls index 902d82ec7..95135566b 100644 --- a/salt/soc/config.sls +++ b/salt/soc/config.sls @@ -114,6 +114,13 @@ socuploaddir: - group: 939 - makedirs: True +socsigmarepo: + file.directory: + - name: /opt/so/rules + - user: 939 + - group: 939 + - mode: 775 + {% else %} {{sls}}_state_not_allowed: From 8800b7e8789297d72637a8a5016f14a1579c8db9 Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Tue, 30 Jan 2024 15:43:51 -0700 Subject: [PATCH 121/777] WIP: Detections Changes Removed some strelka/yara rules from salt. Removed yara scripts for downloading and updating rules. This will be managed by SOC. Added a new compile_yara.py script. Added the strelka repos folder. --- salt/manager/init.sls | 53 +------------------ .../manager/tools/sbin_jinja/so-yara-download | 51 ------------------ salt/manager/tools/sbin_jinja/so-yara-update | 41 -------------- salt/soc/files/bin/compile_yara.py | 14 +++++ salt/strelka/backend/config.sls | 10 ---- salt/strelka/config.sls | 9 +++- 6 files changed, 24 insertions(+), 154 deletions(-) delete mode 100644 salt/manager/tools/sbin_jinja/so-yara-download delete mode 100755 salt/manager/tools/sbin_jinja/so-yara-update create mode 100644 salt/soc/files/bin/compile_yara.py diff --git a/salt/manager/init.sls b/salt/manager/init.sls index 23ef189b5..51590a6ec 100644 --- a/salt/manager/init.sls +++ b/salt/manager/init.sls @@ -1,5 +1,5 @@ # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. @@ -61,7 +61,7 @@ manager_sbin: - user: 939 - group: 939 - file_mode: 755 - - exclude_pat: + - exclude_pat: - "*_test.py" yara_update_scripts: @@ -103,55 +103,6 @@ rules_dir: - group: socore - makedirs: True -{% if STRELKAMERGED.rules.enabled %} - -strelkarepos: - file.managed: - - name: /opt/so/conf/strelka/repos.txt - - source: salt://strelka/rules/repos.txt.jinja - - template: jinja - - defaults: - STRELKAREPOS: {{ STRELKAMERGED.rules.repos }} - - makedirs: True - -strelka-yara-update: - {% if MANAGERMERGED.reposync.enabled and not GLOBALS.airgap %} - cron.present: - {% else %} - cron.absent: - {% endif %} - - user: socore - - name: '/usr/sbin/so-yara-update >> /opt/so/log/yarasync/yara-update.log 2>&1' - - identifier: strelka-yara-update - - hour: '7' - - minute: '1' - -strelka-yara-download: - {% if MANAGERMERGED.reposync.enabled and not GLOBALS.airgap %} - cron.present: - {% else %} - cron.absent: - {% endif %} - - user: socore - - name: '/usr/sbin/so-yara-download >> /opt/so/log/yarasync/yara-download.log 2>&1' - - identifier: strelka-yara-download - - hour: '7' - - minute: '1' - -{% if not GLOBALS.airgap %} -update_yara_rules: - cmd.run: - - name: /usr/sbin/so-yara-update - - onchanges: - - file: yara_update_scripts - -download_yara_rules: - cmd.run: - - name: /usr/sbin/so-yara-download - - onchanges: - - file: yara_update_scripts -{% endif %} -{% endif %} {% else %} {{sls}}_state_not_allowed: diff --git a/salt/manager/tools/sbin_jinja/so-yara-download b/salt/manager/tools/sbin_jinja/so-yara-download deleted file mode 100644 index aa9576253..000000000 --- a/salt/manager/tools/sbin_jinja/so-yara-download +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/bash -NOROOT=1 -. /usr/sbin/so-common - -{%- set proxy = salt['pillar.get']('manager:proxy') %} -{%- set noproxy = salt['pillar.get']('manager:no_proxy', '') %} - -# Download the rules from the internet -{%- if proxy %} -export http_proxy={{ proxy }} -export https_proxy={{ proxy }} -export no_proxy="{{ noproxy }}" -{%- endif %} - -repos="/opt/so/conf/strelka/repos.txt" -output_dir=/nsm/rules/yara -gh_status=$(curl -s -o /dev/null -w "%{http_code}" https://github.com) -clone_dir="/tmp" -if [ "$gh_status" == "200" ] || [ "$gh_status" == "301" ]; then - - while IFS= read -r repo; do - if ! $(echo "$repo" | grep -qE '^#'); then - # Remove old repo if existing bc of previous error condition or unexpected disruption - repo_name=`echo $repo | awk -F '/' '{print $NF}'` - [ -d $output_dir/$repo_name ] && rm -rf $output_dir/$repo_name - - # Clone repo and make appropriate directories for rules - git clone $repo $clone_dir/$repo_name - echo "Analyzing rules from $clone_dir/$repo_name..." - mkdir -p $output_dir/$repo_name - # Ensure a copy of the license is available for the rules - [ -f $clone_dir/$repo_name/LICENSE ] && cp $clone_dir/$repo_name/LICENSE $output_dir/$repo_name - - # Copy over rules - for i in $(find $clone_dir/$repo_name -name "*.yar*"); do - rule_name=$(echo $i | awk -F '/' '{print $NF}') - cp $i $output_dir/$repo_name - done - rm -rf $clone_dir/$repo_name - fi - done < $repos - - echo "Done!" - -/usr/sbin/so-yara-update - -else - echo "Server returned $gh_status status code." - echo "No connectivity to Github...exiting..." - exit 1 -fi diff --git a/salt/manager/tools/sbin_jinja/so-yara-update b/salt/manager/tools/sbin_jinja/so-yara-update deleted file mode 100755 index 07c940f47..000000000 --- a/salt/manager/tools/sbin_jinja/so-yara-update +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -NOROOT=1 -. /usr/sbin/so-common - -echo "Starting to check for yara rule updates at $(date)..." - -newcounter=0 -excludedcounter=0 -excluded_rules=({{ EXCLUDEDRULES | join(' ') }}) - -# Pull down the SO Rules -SORULEDIR=/nsm/rules/yara -OUTPUTDIR=/opt/so/saltstack/local/salt/strelka/rules - -mkdir -p $OUTPUTDIR -# remove all rules prior to copy so we can clear out old rules -rm -f $OUTPUTDIR/* - -for i in $(find $SORULEDIR -name "*.yar" -o -name "*.yara"); do - rule_name=$(echo $i | awk -F '/' '{print $NF}') - if [[ ! "${excluded_rules[*]}" =~ ${rule_name} ]]; then - echo "Adding rule: $rule_name..." - cp $i $OUTPUTDIR/$rule_name - ((newcounter++)) - else - echo "Excluding rule: $rule_name..." - ((excludedcounter++)) - fi -done - -if [ "$newcounter" -gt 0 ] || [ "$excludedcounter" -gt 0 ];then - echo "$newcounter rules added." - echo "$excludedcounter rule(s) excluded." -fi - -echo "Finished rule updates at $(date)..." diff --git a/salt/soc/files/bin/compile_yara.py b/salt/soc/files/bin/compile_yara.py new file mode 100644 index 000000000..43c8b1a09 --- /dev/null +++ b/salt/soc/files/bin/compile_yara.py @@ -0,0 +1,14 @@ +import os +import yara +import glob +import sys + +def compile_yara_rules(rules_dir: str) -> None: + compiled_rules_path: str = os.path.join(rules_dir, "rules.yar.compiled") + rule_files: list[str] = glob.glob(os.path.join(rules_dir, '**/*.yar'), recursive=True) + + if rule_files: + rules: yara.Rules = yara.compile(filepaths={os.path.basename(f): f for f in rule_files}) + rules.save(compiled_rules_path) + +compile_yara_rules(sys.argv[1]) diff --git a/salt/strelka/backend/config.sls b/salt/strelka/backend/config.sls index d51debb1b..b39e06ac8 100644 --- a/salt/strelka/backend/config.sls +++ b/salt/strelka/backend/config.sls @@ -50,16 +50,6 @@ backend_taste: - user: 939 - group: 939 -{% if STRELKAMERGED.rules.enabled %} -strelkarules: - file.recurse: - - name: /opt/so/conf/strelka/rules - - source: salt://strelka/rules - - user: 939 - - group: 939 - - clean: True -{% endif %} - {% else %} {{sls}}_state_not_allowed: diff --git a/salt/strelka/config.sls b/salt/strelka/config.sls index 1d0f75adf..929bef113 100644 --- a/salt/strelka/config.sls +++ b/salt/strelka/config.sls @@ -1,5 +1,5 @@ # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. @@ -21,6 +21,13 @@ strelkarulesdir: - group: 939 - makedirs: True +strelkareposdir: + file.directory: + - name: /opt/so/conf/strelka/repos + - user: 939 + - group: 939 + - makedirs: True + strelkadatadir: file.directory: - name: /nsm/strelka From f321e734ebabe5043a01ab79d320abc485d3ef48 Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Wed, 31 Jan 2024 10:39:47 -0700 Subject: [PATCH 122/777] Added so-detection mapping in elasticsearch --- salt/elasticsearch/defaults.yaml | 31 ++++- .../component/so/detection-mappings.json | 108 ++++++++++++++++++ .../component/so/detection-settings.json | 7 ++ 3 files changed, 145 insertions(+), 1 deletion(-) create mode 100644 salt/elasticsearch/templates/component/so/detection-mappings.json create mode 100644 salt/elasticsearch/templates/component/so/detection-settings.json diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index 03cd6d519..f4d8c8a95 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -198,6 +198,35 @@ elasticsearch: sort: field: '@timestamp' order: desc + so-detection: + index_sorting: false + index_template: + composed_of: + - detection-mappings + - detection-settings + index_patterns: + - so-detection* + priority: 500 + template: + mappings: + date_detection: false + dynamic_templates: + - strings_as_keyword: + mapping: + ignore_above: 1024 + type: keyword + match_mapping_type: string + settings: + index: + mapping: + total_fields: + limit: 1500 + number_of_replicas: 0 + number_of_shards: 1 + refresh_interval: 30s + sort: + field: '@timestamp' + order: desc so-common: close: 30 delete: 365 @@ -8990,7 +9019,7 @@ elasticsearch: actions: set_priority: priority: 50 - min_age: 30d + min_age: 30d so-logs-ti_otx_x_threat: index_sorting: false index_template: diff --git a/salt/elasticsearch/templates/component/so/detection-mappings.json b/salt/elasticsearch/templates/component/so/detection-mappings.json new file mode 100644 index 000000000..df53308f2 --- /dev/null +++ b/salt/elasticsearch/templates/component/so/detection-mappings.json @@ -0,0 +1,108 @@ +{ + "template": { + "mappings": { + "properties": { + "so_audit_doc_id": { + "ignore_above": 1024, + "type": "keyword" + }, + "@timestamp": { + "type": "date" + }, + "so_kind": { + "ignore_above": 1024, + "type": "keyword" + }, + "so_operation": { + "ignore_above": 1024, + "type": "keyword" + }, + "so_detection": { + "properties": { + "publicId": { + "type": "text" + }, + "title": { + "type": "text" + }, + "severity": { + "ignore_above": 1024, + "type": "keyword" + }, + "author": { + "type": "text" + }, + "description": { + "type": "text" + }, + "content": { + "type": "text" + }, + "isEnabled": { + "type": "boolean" + }, + "isReporting": { + "type": "boolean" + }, + "isCommunity": { + "type": "boolean" + }, + "note": { + "type": "text" + }, + "engine": { + "ignore_above": 1024, + "type": "keyword" + }, + "overrides": { + "properties": { + "type": { + "ignore_above": 1024, + "type": "keyword" + }, + "isEnabled": { + "type": "boolean" + }, + "createdAt": { + "type": "date" + }, + "updatedAt": { + "type": "date" + }, + "regex": { + "type": "text" + }, + "value": { + "type": "text" + }, + "thresholdType": { + "ignore_above": 1024, + "type": "keyword" + }, + "track": { + "ignore_above": 1024, + "type": "keyword" + }, + "ip": { + "type": "text" + }, + "count": { + "type": "long" + }, + "seconds": { + "type": "long" + }, + "customFilter": { + "type": "text" + } + } + } + } + } + } + } + }, + "_meta": { + "ecs_version": "1.12.2" + } +} \ No newline at end of file diff --git a/salt/elasticsearch/templates/component/so/detection-settings.json b/salt/elasticsearch/templates/component/so/detection-settings.json new file mode 100644 index 000000000..7b0947a4c --- /dev/null +++ b/salt/elasticsearch/templates/component/so/detection-settings.json @@ -0,0 +1,7 @@ +{ + "template": {}, + "version": 1, + "_meta": { + "description": "default settings for common Security Onion Detections indices" + } +} \ No newline at end of file From 2e9fa2438b01eb65f16b942ea7fdcb6009486a9f Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 13 Feb 2024 16:19:50 -0500 Subject: [PATCH 123/777] add back comment --- salt/manager/tools/sbin/soup | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 0e982e51f..8b9d4a6b3 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -797,6 +797,7 @@ verify_latest_update_script() { salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local + # Verify that soup scripts updated as expected get_soup_script_hashes if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" && "$CURRENTSOFIREWALL" == "$GITSOFIREWALL" ]]; then echo "Succesfully updated soup scripts." From 79e98e508f0d6f0ebd100e4d38bf767151cc82a2 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 14 Feb 2024 13:28:12 -0500 Subject: [PATCH 124/777] pass in UPDATE_DIR as a pillar --- salt/common/soup_scripts.sls | 7 ++----- salt/manager/tools/sbin/soup | 2 +- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/salt/common/soup_scripts.sls b/salt/common/soup_scripts.sls index c02f11172..9c3b8ad3a 100644 --- a/salt/common/soup_scripts.sls +++ b/salt/common/soup_scripts.sls @@ -6,11 +6,8 @@ remove_common_so-firewall: file.absent: - name: /opt/so/saltstack/default/salt/common/tools/sbin/so-firewall -{% if salt['pillar.get']('global:airgap') %} -{% set UPDATE_DIR='/tmp/soagupdate/SecurityOnion'%} -{% else %} -{% set UPDATE_DIR='/tmp/sogh/securityonion'%} -{% endif %} +{# this pillar isn't defined anywhere. it is passed in from soup when the state is called #} +{% set UPDATE_DIR= salt['pillar.get']('UPDATE_DIR') %} copy_so-common_common_tools_sbin: file.copy: diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 8b9d4a6b3..c44883d15 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -803,7 +803,7 @@ verify_latest_update_script() { echo "Succesfully updated soup scripts." else echo "There was a problem updating soup scripts. Trying to rerun script update." - salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local + salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local pillar="{'UPDATE_DIR': $UPDATE_DIR}" fi echo "" From c1f467a06819c22e3bec737c7270b11c31bc6d9f Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 14 Feb 2024 14:22:18 -0500 Subject: [PATCH 125/777] handle airgap --- salt/common/soup_scripts.sls | 8 ++++++-- salt/manager/tools/sbin/soup | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/salt/common/soup_scripts.sls b/salt/common/soup_scripts.sls index 9c3b8ad3a..5a7bdef47 100644 --- a/salt/common/soup_scripts.sls +++ b/salt/common/soup_scripts.sls @@ -6,8 +6,12 @@ remove_common_so-firewall: file.absent: - name: /opt/so/saltstack/default/salt/common/tools/sbin/so-firewall -{# this pillar isn't defined anywhere. it is passed in from soup when the state is called #} -{% set UPDATE_DIR= salt['pillar.get']('UPDATE_DIR') %} +{% import_yaml '/opt/so/saltstack/local/pillar/global/soc_global.sls' as SOC_GLOBAL %} +{% if SOC_GLOBAL.global.airgap %} +{% set UPDATE_DIR='/tmp/soagupdate/SecurityOnion' %} +{% else %} +{% set UPDATE_DIR='/tmp/sogh/securityonion' %} +{% endif %} copy_so-common_common_tools_sbin: file.copy: diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index c44883d15..8b9d4a6b3 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -803,7 +803,7 @@ verify_latest_update_script() { echo "Succesfully updated soup scripts." else echo "There was a problem updating soup scripts. Trying to rerun script update." - salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local pillar="{'UPDATE_DIR': $UPDATE_DIR}" + salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local fi echo "" From a2b17d23485804bd4e4895f3e3e5f70d9a83d8e6 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 14 Feb 2024 14:27:41 -0500 Subject: [PATCH 126/777] move jinja to top --- salt/common/soup_scripts.sls | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/salt/common/soup_scripts.sls b/salt/common/soup_scripts.sls index 5a7bdef47..fd32b8a28 100644 --- a/salt/common/soup_scripts.sls +++ b/salt/common/soup_scripts.sls @@ -1,3 +1,10 @@ +{% import_yaml '/opt/so/saltstack/local/pillar/global/soc_global.sls' as SOC_GLOBAL %} +{% if SOC_GLOBAL.global.airgap %} +{% set UPDATE_DIR='/tmp/soagupdate/SecurityOnion' %} +{% else %} +{% set UPDATE_DIR='/tmp/sogh/securityonion' %} +{% endif %} + remove_common_soup: file.absent: - name: /opt/so/saltstack/default/salt/common/tools/sbin/soup @@ -6,13 +13,6 @@ remove_common_so-firewall: file.absent: - name: /opt/so/saltstack/default/salt/common/tools/sbin/so-firewall -{% import_yaml '/opt/so/saltstack/local/pillar/global/soc_global.sls' as SOC_GLOBAL %} -{% if SOC_GLOBAL.global.airgap %} -{% set UPDATE_DIR='/tmp/soagupdate/SecurityOnion' %} -{% else %} -{% set UPDATE_DIR='/tmp/sogh/securityonion' %} -{% endif %} - copy_so-common_common_tools_sbin: file.copy: - name: /opt/so/saltstack/default/salt/common/tools/sbin/so-common From c64f37ab671688e46d985806f7267f6c87ce48b0 Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Thu, 15 Feb 2024 10:34:07 -0700 Subject: [PATCH 127/777] sigmaRulePackages is now a string array --- salt/soc/defaults.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index a19fb45f0..546114b9f 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -64,7 +64,7 @@ soc: icon: fa-external-link-alt target: _blank links: - - 'https://{:sublime.url}/messages/{:sublime.message_group_id}' + - 'https://{:sublime.url}/messages/{:sublime.message_group_id}' - name: actionProcessAncestors description: actionProcessAncestorsHelp icon: fa-people-roof @@ -1012,7 +1012,8 @@ soc: communityRulesImportFrequencySeconds: 180 elastAlertRulesFolder: /opt/sensoroni/elastalert rulesFingerprintFile: /opt/sensoroni/fingerprints/sigma.fingerprint - sigmaRulePackages: core + sigmaRulePackages: + - core elastic: hostUrl: remoteHostUrls: [] From ffb3cc87b7adf47452ced95ae0d8aff370a1ef12 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Fri, 16 Feb 2024 11:55:10 -0500 Subject: [PATCH 128/777] Default ruleset; Descriptions --- salt/idstools/soc_idstools.yaml | 2 +- salt/soc/defaults.yaml | 1 + salt/soc/soc_soc.yaml | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/salt/idstools/soc_idstools.yaml b/salt/idstools/soc_idstools.yaml index 7cf11dba4..f8ec3b8b6 100644 --- a/salt/idstools/soc_idstools.yaml +++ b/salt/idstools/soc_idstools.yaml @@ -8,7 +8,7 @@ idstools: global: True helpLink: rules.html ruleset: - description: Defines the ruleset you want to run. Options are ETOPEN or ETPRO. -- WARNING -- Changing the ruleset will remove all existing Suricata rules of the previous ruleset and their associated overrides.' + description: 'Defines the ruleset you want to run. Options are ETOPEN or ETPRO. WARNING! Changing the ruleset will remove all existing Suricata rules of the previous ruleset and their associated overrides. This removal cannot be undone.' global: True regex: ETPRO\b|ETOPEN\b helpLink: rules.html diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 546114b9f..91d47cf91 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1014,6 +1014,7 @@ soc: rulesFingerprintFile: /opt/sensoroni/fingerprints/sigma.fingerprint sigmaRulePackages: - core + - emerging_threats_addon elastic: hostUrl: remoteHostUrls: [] diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index fe0458820..fdfb09733 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -72,7 +72,7 @@ soc: modules: elastalertengine: sigmaRulePackages: - description: 'Defines the Sigma Community Ruleset you want to run: core | core+ | core++ | all. -- WARNING -- Changing the ruleset will remove all existing Sigma rules of the previous ruleset and their associated overrides.' + description: 'Defines the Sigma Community Ruleset you want to run. One of these (core | core+ | core++ | all ) as well as an optional Add-on (emerging_threats_addon). WARNING! Changing the ruleset will remove all existing Sigma rules of the previous ruleset and their associated overrides. This removal cannot be undone.' global: True advanced: False elastic: From 18b4fcca757fc0c41ef32f1c077198a358f0a9c4 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 20 Feb 2024 09:47:05 -0500 Subject: [PATCH 129/777] 2.4.50 --- DOWNLOAD_AND_VERIFY_ISO.md | 22 ++++++++++----------- sigs/securityonion-2.4.50-20240220.iso.sig | Bin 0 -> 566 bytes 2 files changed, 11 insertions(+), 11 deletions(-) create mode 100644 sigs/securityonion-2.4.50-20240220.iso.sig diff --git a/DOWNLOAD_AND_VERIFY_ISO.md b/DOWNLOAD_AND_VERIFY_ISO.md index 16641e4cb..a23d88d4d 100644 --- a/DOWNLOAD_AND_VERIFY_ISO.md +++ b/DOWNLOAD_AND_VERIFY_ISO.md @@ -1,17 +1,17 @@ -### 2.4.40-20240116 ISO image released on 2024/01/17 +### 2.4.50-20240220 ISO image released on 2024/02/20 ### Download and Verify -2.4.40-20240116 ISO image: -https://download.securityonion.net/file/securityonion/securityonion-2.4.40-20240116.iso +2.4.50-20240220 ISO image: +https://download.securityonion.net/file/securityonion/securityonion-2.4.50-20240220.iso -MD5: AC55D027B663F3CE0878FEBDAD9DD78B -SHA1: C2B51723B17F3DC843CC493EB80E93B123E3A3E1 -SHA256: C5F135FCF45A836BBFF58C231F95E1EA0CD894898322187AD5FBFCD24BC2F123 +MD5: BCA6476EF1BF79773D8EFB11700FDE8E +SHA1: 9FF0A304AA368BCD2EF2BE89AD47E65650241927 +SHA256: 49D7695EFFF6F3C4840079BF564F3191B585639816ADE98672A38017F25E9570 Signature for ISO image: -https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.40-20240116.iso.sig +https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.50-20240220.iso.sig Signing key: https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS @@ -25,22 +25,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2. Download the signature file for the ISO: ``` -wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.40-20240116.iso.sig +wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.50-20240220.iso.sig ``` Download the ISO image: ``` -wget https://download.securityonion.net/file/securityonion/securityonion-2.4.40-20240116.iso +wget https://download.securityonion.net/file/securityonion/securityonion-2.4.50-20240220.iso ``` Verify the downloaded ISO image using the signature file: ``` -gpg --verify securityonion-2.4.40-20240116.iso.sig securityonion-2.4.40-20240116.iso +gpg --verify securityonion-2.4.50-20240220.iso.sig securityonion-2.4.50-20240220.iso ``` The output should show "Good signature" and the Primary key fingerprint should match what's shown below: ``` -gpg: Signature made Tue 16 Jan 2024 07:34:40 PM EST using RSA key ID FE507013 +gpg: Signature made Fri 16 Feb 2024 11:36:25 AM EST using RSA key ID FE507013 gpg: Good signature from "Security Onion Solutions, LLC " gpg: WARNING: This key is not certified with a trusted signature! gpg: There is no indication that the signature belongs to the owner. diff --git a/sigs/securityonion-2.4.50-20240220.iso.sig b/sigs/securityonion-2.4.50-20240220.iso.sig new file mode 100644 index 0000000000000000000000000000000000000000..bb9eac3235fc167faf84cac11d3ade5db5f32b3b GIT binary patch literal 566 zcmV-60?GY}0y6{v0SEvc79j-41gSkXz6^6dp_W8^5Ma0dP;e6k0%gyS2>=QS5PT3| zxBgIY6F7Ab|9^^Iy!d&RN}tRszHX8*RCw0VN{2)C;pNY9Xy6yl9M0|3s5yqxZBL_u zh{EOVK-a6>W^Fid3&iNcidnRyYX~RVDwYEk7f{AWBIFOR!1)AC)q1$abfgu+eF+8h z9(Pe-(1kJ5+!BA+Wxr_Sz+NG@jjTPo-(NZS`RFA|7zrMMM*nguj@XoT%Ad%%Loh8#Bp67#Y@UBaIf;2)7BmGospXL5=F2WKsyc71HR$Jl? zK=0DrYinoNasQMKB(}_x{e+wVTE=D-J>jhE?ES^d=RPOiE{3AX>C~3_TeHEaLGX@@ z#C&)jr3=jP&JU;Bzw?HyM$T9<_~8XuFqXV&s?ti)KV8(Rpo2M017H1Iyr!x4JZ2H# zsi?)VVr20pCr{F398j7|rB@$@qn+G?cp6B)P4{q6;)1e3qjuNn?Jr{KPDp0Ht&O%e zlE*c!Am4nOAX~exc;{Qm+z_T8o_?+yEAETcx`j$ZJ_8~DYPYhxJ^?JL+99~3o|Lt( zhPjNeaV0ZQVDX8C?T}m4 z2Q`fKzUYs>rGkYNdZzk|3VACg@$l~b1b;QiG+Z5;uX8ff1b> Date: Tue, 20 Feb 2024 10:14:11 -0500 Subject: [PATCH 130/777] Update VERSION --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 9cf89c6c7..5a99ed019 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.4.50 +2.4.60 From 4b314c871573144b89bf47fb45eebfc10bb3ba56 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Tue, 20 Feb 2024 10:30:09 -0500 Subject: [PATCH 131/777] replace correlate icon to avoid confusion with searcheng.in --- salt/soc/defaults.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 244a021d3..dc836a9d7 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -20,7 +20,7 @@ soc: - dashboards - name: actionCorrelate description: actionCorrelateHelp - icon: fab fa-searchengin + icon: fa-magnifying-glass-arrow-right target: '' links: - '/#/hunt?q=("{:log.id.fuid}" OR "{:log.id.uid}" OR "{:network.community_id}") | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid' From 6c6a362fcc14ae2c7df4c71ea4d2299072779d8a Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Tue, 20 Feb 2024 19:14:18 -0500 Subject: [PATCH 132/777] add lock threads --- .github/workflows/lock-threads.yml | 42 ++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 .github/workflows/lock-threads.yml diff --git a/.github/workflows/lock-threads.yml b/.github/workflows/lock-threads.yml new file mode 100644 index 000000000..25e5d8c17 --- /dev/null +++ b/.github/workflows/lock-threads.yml @@ -0,0 +1,42 @@ +name: 'Lock Threads' + +on: + schedule: + - cron: '50 1 * * *' + workflow_dispatch: + +permissions: + issues: write + pull-requests: write + discussions: write + +concurrency: + group: lock-threads + +jobs: + close-threads: + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + steps: + - uses: actions/stale@v5 + with: + days-before-issue-stale: -1 + days-before-issue-close: 60 + stale-issue-message: "This issue is stale because it has been inactive for an extended period. Stale issues convey that the issue, while important to someone, is not critical enough for the author, or other community members to work on, sponsor, or otherwise shepherd the issue through to a resolution." + close-issue-message: "This issue was closed because it has been stale for an extended period. It will be automatically locked in 30 days, after which no further commenting will be available." + days-before-pr-stale: 45 + days-before-pr-close: 60 + stale-pr-message: "This PR is stale because it has been inactive for an extended period. The longer a PR remains stale the more out of date with the main branch it becomes." + close-pr-message: "This PR was closed because it has been stale for an extended period. It will be automatically locked in 30 days. If there is still a commitment to finishing this PR re-open it before it is locked." + + lock-threads: + runs-on: ubuntu-latest + steps: + - uses: jertel/lock-threads@main + with: + include-discussion-currently-open: true + discussion-inactive-days: 90 + issue-inactive-days: 30 + pr-inactive-days: 30 From 9ca0f586ae63a90fef9c78b8b1fc24b020545ae3 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 21 Feb 2024 11:45:02 -0500 Subject: [PATCH 133/777] Manage the repos --- salt/manager/files/mirror.txt | 0 salt/manager/files/repodownload.conf | 13 +++++++++++++ salt/manager/init.sls | 14 ++++++++++++++ setup/so-functions | 4 ++-- 4 files changed, 29 insertions(+), 2 deletions(-) create mode 100644 salt/manager/files/mirror.txt create mode 100644 salt/manager/files/repodownload.conf diff --git a/salt/manager/files/mirror.txt b/salt/manager/files/mirror.txt new file mode 100644 index 000000000..e69de29bb diff --git a/salt/manager/files/repodownload.conf b/salt/manager/files/repodownload.conf new file mode 100644 index 000000000..3c156a9db --- /dev/null +++ b/salt/manager/files/repodownload.conf @@ -0,0 +1,13 @@ +[main] +gpgcheck=1 +installonly_limit=3 +clean_requirements_on_remove=True +best=True +skip_if_unavailable=False +cachedir=/opt/so/conf/reposync/cache +keepcache=0 +[securityonionsync] +name=Security Onion Repo repo +mirrorlist=file:///opt/so/conf/reposync/mirror.txt +enabled=1 +gpgcheck=1 \ No newline at end of file diff --git a/salt/manager/init.sls b/salt/manager/init.sls index 23ef189b5..e51a448d5 100644 --- a/salt/manager/init.sls +++ b/salt/manager/init.sls @@ -75,6 +75,20 @@ yara_update_scripts: - defaults: EXCLUDEDRULES: {{ STRELKAMERGED.rules.excluded }} +so-repo-file: + file.managed: + - name: /opt/so/conf/reposync/repodownload.conf + - source: salt://manager/files/repodownload.conf + - user: socore + - group: socore + +so-repo-mirrorlist: + file.managed: + - name: /opt/so/conf/reposync/mirror.txt + - source: salt://manager/files/mirror.txt + - user: socore + - group: socore + so-repo-sync: {% if MANAGERMERGED.reposync.enabled %} cron.present: diff --git a/setup/so-functions b/setup/so-functions index f0462e4d6..ef1df4a71 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1811,8 +1811,8 @@ repo_sync_local() { info "Adding Repo Download Configuration" mkdir -p /nsm/repo mkdir -p /opt/so/conf/reposync/cache - echo "https://repo.securityonion.net/file/so-repo/prod/2.4/oracle/9" > /opt/so/conf/reposync/mirror.txt - echo "https://so-repo-east.s3.us-east-005.backblazeb2.com/prod/2.4/oracle/9" >> /opt/so/conf/reposync/mirror.txt + echo "https://repo.securityonion.net/file/so-repo/prod/2.4/oracle/9.3" > /opt/so/conf/reposync/mirror.txt + echo "https://repo-alt.securityonion.net/prod/2.4/oracle/9.3" >> /opt/so/conf/reposync/mirror.txt echo "[main]" > /opt/so/conf/reposync/repodownload.conf echo "gpgcheck=1" >> /opt/so/conf/reposync/repodownload.conf echo "installonly_limit=3" >> /opt/so/conf/reposync/repodownload.conf From 25570e6ec2e9935c4a4481fd5f0ad32b9b7068de Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 21 Feb 2024 13:18:39 -0500 Subject: [PATCH 134/777] add missing template --- .github/DISCUSSION_TEMPLATE/2-4.yml | 190 ++++++++++++++++++++++++++++ 1 file changed, 190 insertions(+) create mode 100644 .github/DISCUSSION_TEMPLATE/2-4.yml diff --git a/.github/DISCUSSION_TEMPLATE/2-4.yml b/.github/DISCUSSION_TEMPLATE/2-4.yml new file mode 100644 index 000000000..8e2592071 --- /dev/null +++ b/.github/DISCUSSION_TEMPLATE/2-4.yml @@ -0,0 +1,190 @@ +body: + - type: markdown + attributes: + value: | + ⚠️ This category is solely for conversations related to Security Onion 2.4 ⚠️ + + If your organization needs more immediate, enterprise grade professional support, with one-on-one virtual meetings and screensharing, contact us via our website: https://securityonion.com/support + - type: dropdown + attributes: + label: Version + description: Which version of Security Onion 2.4.x are you asking about? + options: + - + - 2.4 Pre-release (Beta, Release Candidate) + - 2.4.10 + - 2.4.20 + - 2.4.30 + - 2.4.40 + - 2.4.50 + - 2.4.60 + - 2.4.70 + - 2.4.80 + - 2.4.90 + - 2.4.100 + - Other (please provide detail below) + validations: + required: true + - type: dropdown + attributes: + label: Installation Method + description: How did you install Security Onion? + options: + - + - Security Onion ISO image + - Network installation on Red Hat derivative like Oracle, Rocky, Alma, etc. + - Network installation on Ubuntu + - Network installation on Debian + - Other (please provide detail below) + validations: + required: true + - type: dropdown + attributes: + label: Description + description: > + Is this discussion about installation, configuration, upgrading, or other? + options: + - + - installation + - configuration + - upgrading + - other (please provide detail below) + validations: + required: true + - type: dropdown + attributes: + label: Installation Type + description: > + When you installed, did you choose Import, Eval, Standalone, Distributed, or something else? + options: + - + - Import + - Eval + - Standalone + - Distributed + - other (please provide detail below) + validations: + required: true + - type: dropdown + attributes: + label: Location + description: > + Is this deployment in the cloud, on-prem with Internet access, or airgap? + options: + - + - cloud + - on-prem with Internet access + - airgap + - other (please provide detail below) + validations: + required: true + - type: dropdown + attributes: + label: Hardware Specs + description: > + Does your hardware meet or exceed the minimum requirements for your installation type as shown at https://docs.securityonion.net/en/2.4/hardware.html? + options: + - + - Meets minimum requirements + - Exceeds minimum requirements + - Does not meet minimum requirements + - other (please provide detail below) + validations: + required: true + - type: input + attributes: + label: CPU + description: How many CPU cores do you have? + validations: + required: true + - type: input + attributes: + label: RAM + description: How much RAM do you have? + validations: + required: true + - type: input + attributes: + label: Storage for / + description: How much storage do you have for the / partition? + validations: + required: true + - type: input + attributes: + label: Storage for /nsm + description: How much storage do you have for the /nsm partition? + validations: + required: true + - type: dropdown + attributes: + label: Network Traffic Collection + description: > + Are you collecting network traffic from a tap or span port? + options: + - + - tap + - span port + - other (please provide detail below) + validations: + required: true + - type: dropdown + attributes: + label: Network Traffic Speeds + description: > + How much network traffic are you monitoring? + options: + - + - Less than 1Gbps + - 1Gbps to 10Gbps + - more than 10Gbps + validations: + required: true + - type: dropdown + attributes: + label: Status + description: > + Does SOC Grid show all services on all nodes as running OK? + options: + - + - Yes, all services on all nodes are running OK + - No, one or more services are failed (please provide detail below) + validations: + required: true + - type: dropdown + attributes: + label: Salt Status + description: > + Do you get any failures when you run "sudo salt-call state.highstate"? + options: + - + - Yes, there are salt failures (please provide detail below) + - No, there are no failures + validations: + required: true + - type: dropdown + attributes: + label: Logs + description: > + Are there any additional clues in /opt/so/log/? + options: + - + - Yes, there are additional clues in /opt/so/log/ (please provide detail below) + - No, there are no additional clues + validations: + required: true + - type: textarea + attributes: + label: Detail + description: Please read our discussion guidelines at https://github.com/Security-Onion-Solutions/securityonion/discussions/1720 and then provide detailed information to help us help you. + placeholder: |- + STOP! Before typing, please read our discussion guidelines at https://github.com/Security-Onion-Solutions/securityonion/discussions/1720 in their entirety! + + If your organization needs more immediate, enterprise grade professional support, with one-on-one virtual meetings and screensharing, contact us via our website: https://securityonion.com/support + validations: + required: true + - type: checkboxes + attributes: + label: Guidelines + options: + - label: I have read the discussion guidelines at https://github.com/Security-Onion-Solutions/securityonion/discussions/1720 and assert that I have followed the guidelines. + required: true From 162785575cbae9d9454a1d58830ab740129f1c57 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 21 Feb 2024 15:28:24 -0500 Subject: [PATCH 135/777] nest under policy --- salt/elasticsearch/soc_elasticsearch.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/elasticsearch/soc_elasticsearch.yaml b/salt/elasticsearch/soc_elasticsearch.yaml index 9a64190b3..e68d0441b 100644 --- a/salt/elasticsearch/soc_elasticsearch.yaml +++ b/salt/elasticsearch/soc_elasticsearch.yaml @@ -95,6 +95,7 @@ elasticsearch: description: The order to sort by. Must set index_sorting to True. global: True helpLink: elasticsearch.html + policy: phases: hot: max_age: From 927ea0c9ecf01e4f8b08b6e2688abbefac8cf1d2 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Wed, 21 Feb 2024 15:56:12 -0500 Subject: [PATCH 136/777] Update VERSION --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 7f2e97617..5a99ed019 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.4.0-kilo +2.4.60 From 0a9022ba6a1f6ce82efa28d632b2f4f4bb1cd7b6 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Wed, 21 Feb 2024 17:07:08 -0500 Subject: [PATCH 137/777] Add hash mappings --- salt/soc/files/soc/sigma_so_pipeline.yaml | 32 ++++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/salt/soc/files/soc/sigma_so_pipeline.yaml b/salt/soc/files/soc/sigma_so_pipeline.yaml index a1c4d6d62..54ce83eff 100644 --- a/salt/soc/files/soc/sigma_so_pipeline.yaml +++ b/salt/soc/files/soc/sigma_so_pipeline.yaml @@ -15,4 +15,34 @@ transformations: src_ip: destination.ip.keyword src_port: source.port dst_ip: destination.ip.keyword - dst_port: destination.port \ No newline at end of file + dst_port: destination.port + - id: hashes_process-creation + type: field_name_mapping + mapping: + winlog.event_data.sha256: process.hash.sha256 + winlog.event_data.sha1: process.hash.sha1 + winlog.event_data.md5: process.hash.md5 + rule_conditions: + - type: logsource + product: windows + category: process_creation + - id: hashes_image-load + type: field_name_mapping + mapping: + winlog.event_data.sha256: dll.hash.sha256 + winlog.event_data.sha1: dll.hash.sha1 + winlog.event_data.md5: dll.hash.md5 + rule_conditions: + - type: logsource + product: windows + category: image_load + - id: hashes_driver-load + type: field_name_mapping + mapping: + winlog.event_data.sha256: dll.hash.sha256 + winlog.event_data.sha1: dll.hash.sha1 + winlog.event_data.md5: dll.hash.md5 + rule_conditions: + - type: logsource + product: windows + category: driver_load \ No newline at end of file From c886e7279363a8f0c614dc8a753166f5d418d5dc Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Thu, 22 Feb 2024 08:59:33 -0500 Subject: [PATCH 138/777] Imphash mappings --- salt/soc/files/soc/sigma_so_pipeline.yaml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/salt/soc/files/soc/sigma_so_pipeline.yaml b/salt/soc/files/soc/sigma_so_pipeline.yaml index 54ce83eff..8121a6f13 100644 --- a/salt/soc/files/soc/sigma_so_pipeline.yaml +++ b/salt/soc/files/soc/sigma_so_pipeline.yaml @@ -15,13 +15,15 @@ transformations: src_ip: destination.ip.keyword src_port: source.port dst_ip: destination.ip.keyword - dst_port: destination.port + dst_port: destination.port + winlog.event_data.User: user.name - id: hashes_process-creation type: field_name_mapping mapping: winlog.event_data.sha256: process.hash.sha256 winlog.event_data.sha1: process.hash.sha1 winlog.event_data.md5: process.hash.md5 + winlog.event_data.Imphash: process.pe.imphash rule_conditions: - type: logsource product: windows @@ -32,6 +34,7 @@ transformations: winlog.event_data.sha256: dll.hash.sha256 winlog.event_data.sha1: dll.hash.sha1 winlog.event_data.md5: dll.hash.md5 + winlog.event_data.Imphash: dll.pe.imphash rule_conditions: - type: logsource product: windows @@ -42,6 +45,7 @@ transformations: winlog.event_data.sha256: dll.hash.sha256 winlog.event_data.sha1: dll.hash.sha1 winlog.event_data.md5: dll.hash.md5 + winlog.event_data.Imphash: dll.pe.imphash rule_conditions: - type: logsource product: windows From 759b2ff59e5ffa201464a32a0ac776925a8044bb Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 22 Feb 2024 10:03:51 -0500 Subject: [PATCH 139/777] Manage the repos --- salt/manager/files/mirror.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/manager/files/mirror.txt b/salt/manager/files/mirror.txt index e69de29bb..732c116b4 100644 --- a/salt/manager/files/mirror.txt +++ b/salt/manager/files/mirror.txt @@ -0,0 +1,2 @@ +https://repo.securityonion.net/file/so-repo/prod/2.4/oracle/9 +https://repo-alt.securityonion.net/prod/2.4/oracle/9 \ No newline at end of file From e7914fc5a16bc47d243835244985088e717d9a80 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 22 Feb 2024 12:49:06 -0500 Subject: [PATCH 140/777] Update stenoloss.sh --- salt/telegraf/scripts/stenoloss.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/telegraf/scripts/stenoloss.sh b/salt/telegraf/scripts/stenoloss.sh index 5c27ee7a5..5219dcfd0 100644 --- a/salt/telegraf/scripts/stenoloss.sh +++ b/salt/telegraf/scripts/stenoloss.sh @@ -10,8 +10,8 @@ # if this script isn't already running if [[ ! "`pidof -x $(basename $0) -o %PPID`" ]]; then - CHECKIT=$(grep "Thread 0" /var/log/stenographer/stenographer.log |tac |head -2|wc -l) - STENOGREP=$(grep "Thread 0" /var/log/stenographer/stenographer.log |tac |head -2) + CHECKIT=$(grep "Thread 0 stats" /var/log/stenographer/stenographer.log |tac |head -2|wc -l) + STENOGREP=$(grep "Thread 0 stats" /var/log/stenographer/stenographer.log |tac |head -2) declare RESULT=($STENOGREP) From d04aa06455e7e96b5cbdac6c09953e01bd718c81 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Thu, 22 Feb 2024 14:01:02 -0500 Subject: [PATCH 141/777] Fix source.ip --- salt/soc/files/soc/sigma_so_pipeline.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/soc/files/soc/sigma_so_pipeline.yaml b/salt/soc/files/soc/sigma_so_pipeline.yaml index 8121a6f13..533823e6f 100644 --- a/salt/soc/files/soc/sigma_so_pipeline.yaml +++ b/salt/soc/files/soc/sigma_so_pipeline.yaml @@ -12,7 +12,7 @@ transformations: sid: rule.uuid answer: answers query: dns.query.name - src_ip: destination.ip.keyword + src_ip: source.ip.keyword src_port: source.port dst_ip: destination.ip.keyword dst_port: destination.port From b8baca417bc6a50a5149b1f538dba6e9eb26ffb4 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Fri, 23 Feb 2024 14:03:04 -0500 Subject: [PATCH 142/777] add endpoint_x_events_x_process to defaults.yaml --- salt/soc/defaults.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 2c15fe996..d672d1dad 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -995,6 +995,14 @@ soc: - tds.header_type - log.id.uid - event.dataset + ':endpoint:endpoint_x_events_x_process': + - soc_timestamp + - event.dataset + - host.name + - user.name + - process.parent.name + - process.name + - process.working_directory server: bindAddress: 0.0.0.0:9822 baseUrl: / From 573d565976b267b304ce6c03b5ca8a9041b4ad70 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 23 Feb 2024 15:03:44 -0500 Subject: [PATCH 143/777] convert _x_ to . for soc ui to config --- salt/soc/merged.map.jinja | 2 ++ salt/soc/soc_soc.yaml | 5 +++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/salt/soc/merged.map.jinja b/salt/soc/merged.map.jinja index 33c0070ad..55d8d2600 100644 --- a/salt/soc/merged.map.jinja +++ b/salt/soc/merged.map.jinja @@ -66,6 +66,8 @@ {% do SOCMERGED.config.server.client.alerts.update({'actions': standard_actions}) %} {% do SOCMERGED.config.server.client.cases.update({'actions': standard_actions}) %} +{# replace the _x_ with . for soc ui to config conversion #} +{% do SOCMERGED.config.eventFields.update({':endpoint:endpoint.events.process': SOCMERGED.config.eventFields.pop(':endpoint:endpoint_x_events_x_process') }) %} {% set standard_eventFields = SOCMERGED.config.pop('eventFields') %} {% do SOCMERGED.config.server.client.hunt.update({'eventFields': standard_eventFields}) %} {% do SOCMERGED.config.server.client.dashboards.update({'eventFields': standard_eventFields}) %} diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index fdfb09733..13e50a0cb 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -55,10 +55,11 @@ soc: global: True forcedType: "[]{}" eventFields: - default: - description: Event fields mappings are defined by the format ":event.module:event.dataset". For example, to customize which fields show for 'syslog' events originating from 'zeek', find the eventField item in the left panel that looks like ':zeek:syslog'. This 'default' entry is used for all events that do not match an existing mapping defined in the list to the left. + default: &eventFields + description: Event fields mappings are defined by the format ":event.module:event.dataset". For example, to customize which fields show for 'syslog' events originating from 'zeek', find the eventField item in the left panel that looks like ':zeek:syslog'. The 'default' entry is used for all events that do not match an existing mapping defined in the list to the left. global: True advanced: True + ':endpoint:endpoint_x_events_x_process': *eventFields server: srvKey: description: Unique key for protecting the integrity of user submitted data via the web browser. From 7da0ccf5a628d71e75eef7fda547004ca693fec4 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Fri, 23 Feb 2024 15:35:53 -0500 Subject: [PATCH 144/777] add more endpoint.events.x entries to merged.map.jinja --- salt/soc/merged.map.jinja | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/salt/soc/merged.map.jinja b/salt/soc/merged.map.jinja index 55d8d2600..c0ea836e5 100644 --- a/salt/soc/merged.map.jinja +++ b/salt/soc/merged.map.jinja @@ -67,7 +67,13 @@ {% do SOCMERGED.config.server.client.cases.update({'actions': standard_actions}) %} {# replace the _x_ with . for soc ui to config conversion #} +{% do SOCMERGED.config.eventFields.update({':endpoint:endpoint.events.api': SOCMERGED.config.eventFields.pop(':endpoint:endpoint_x_events_x_api') }) %} +{% do SOCMERGED.config.eventFields.update({':endpoint:endpoint.events.file': SOCMERGED.config.eventFields.pop(':endpoint:endpoint_x_events_x_file') }) %} +{% do SOCMERGED.config.eventFields.update({':endpoint:endpoint.events.library': SOCMERGED.config.eventFields.pop(':endpoint:endpoint_x_events_x_library') }) %} +{% do SOCMERGED.config.eventFields.update({':endpoint:endpoint.events.network': SOCMERGED.config.eventFields.pop(':endpoint:endpoint_x_events_x_network') }) %} {% do SOCMERGED.config.eventFields.update({':endpoint:endpoint.events.process': SOCMERGED.config.eventFields.pop(':endpoint:endpoint_x_events_x_process') }) %} +{% do SOCMERGED.config.eventFields.update({':endpoint:endpoint.events.registry': SOCMERGED.config.eventFields.pop(':endpoint:endpoint_x_events_x_registry') }) %} +{% do SOCMERGED.config.eventFields.update({':endpoint:endpoint.events.security': SOCMERGED.config.eventFields.pop(':endpoint:endpoint_x_events_x_security') }) %} {% set standard_eventFields = SOCMERGED.config.pop('eventFields') %} {% do SOCMERGED.config.server.client.hunt.update({'eventFields': standard_eventFields}) %} {% do SOCMERGED.config.server.client.dashboards.update({'eventFields': standard_eventFields}) %} From b7ef1e8af121862174fd72b0a75125fbf1341ec1 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Fri, 23 Feb 2024 15:38:53 -0500 Subject: [PATCH 145/777] add more endpoint.events.x fields to soc_soc.yaml --- salt/soc/soc_soc.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index 13e50a0cb..f59d6117b 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -59,7 +59,13 @@ soc: description: Event fields mappings are defined by the format ":event.module:event.dataset". For example, to customize which fields show for 'syslog' events originating from 'zeek', find the eventField item in the left panel that looks like ':zeek:syslog'. The 'default' entry is used for all events that do not match an existing mapping defined in the list to the left. global: True advanced: True + ':endpoint:endpoint_x_events_x_api': *eventFields + ':endpoint:endpoint_x_events_x_file': *eventFields + ':endpoint:endpoint_x_events_x_library': *eventFields + ':endpoint:endpoint_x_events_x_network': *eventFields ':endpoint:endpoint_x_events_x_process': *eventFields + ':endpoint:endpoint_x_events_x_registry': *eventFields + ':endpoint:endpoint_x_events_x_security': *eventFields server: srvKey: description: Unique key for protecting the integrity of user submitted data via the web browser. From 58f4fb87d08813ab41eea5b24e6b2a4a2326d5ff Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Fri, 23 Feb 2024 17:06:29 -0500 Subject: [PATCH 146/777] fix new eventFields in soc_soc.yaml --- salt/soc/soc_soc.yaml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index f59d6117b..a9d36c70c 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -59,13 +59,13 @@ soc: description: Event fields mappings are defined by the format ":event.module:event.dataset". For example, to customize which fields show for 'syslog' events originating from 'zeek', find the eventField item in the left panel that looks like ':zeek:syslog'. The 'default' entry is used for all events that do not match an existing mapping defined in the list to the left. global: True advanced: True - ':endpoint:endpoint_x_events_x_api': *eventFields - ':endpoint:endpoint_x_events_x_file': *eventFields - ':endpoint:endpoint_x_events_x_library': *eventFields - ':endpoint:endpoint_x_events_x_network': *eventFields - ':endpoint:endpoint_x_events_x_process': *eventFields - ':endpoint:endpoint_x_events_x_registry': *eventFields - ':endpoint:endpoint_x_events_x_security': *eventFields + ':endpoint:events_x_api': *eventFields + ':endpoint:events_x_file': *eventFields + ':endpoint:events_x_library': *eventFields + ':endpoint:events_x_network': *eventFields + ':endpoint:events_x_process': *eventFields + ':endpoint:events_x_registry': *eventFields + ':endpoint:events_x_security': *eventFields server: srvKey: description: Unique key for protecting the integrity of user submitted data via the web browser. From daf96d79342959155d2fccb7313c8e5a9b393f60 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Fri, 23 Feb 2024 17:07:48 -0500 Subject: [PATCH 147/777] fix new eventFields in merged.map.jinja --- salt/soc/merged.map.jinja | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/salt/soc/merged.map.jinja b/salt/soc/merged.map.jinja index c0ea836e5..65091158e 100644 --- a/salt/soc/merged.map.jinja +++ b/salt/soc/merged.map.jinja @@ -67,13 +67,13 @@ {% do SOCMERGED.config.server.client.cases.update({'actions': standard_actions}) %} {# replace the _x_ with . for soc ui to config conversion #} -{% do SOCMERGED.config.eventFields.update({':endpoint:endpoint.events.api': SOCMERGED.config.eventFields.pop(':endpoint:endpoint_x_events_x_api') }) %} -{% do SOCMERGED.config.eventFields.update({':endpoint:endpoint.events.file': SOCMERGED.config.eventFields.pop(':endpoint:endpoint_x_events_x_file') }) %} -{% do SOCMERGED.config.eventFields.update({':endpoint:endpoint.events.library': SOCMERGED.config.eventFields.pop(':endpoint:endpoint_x_events_x_library') }) %} -{% do SOCMERGED.config.eventFields.update({':endpoint:endpoint.events.network': SOCMERGED.config.eventFields.pop(':endpoint:endpoint_x_events_x_network') }) %} -{% do SOCMERGED.config.eventFields.update({':endpoint:endpoint.events.process': SOCMERGED.config.eventFields.pop(':endpoint:endpoint_x_events_x_process') }) %} -{% do SOCMERGED.config.eventFields.update({':endpoint:endpoint.events.registry': SOCMERGED.config.eventFields.pop(':endpoint:endpoint_x_events_x_registry') }) %} -{% do SOCMERGED.config.eventFields.update({':endpoint:endpoint.events.security': SOCMERGED.config.eventFields.pop(':endpoint:endpoint_x_events_x_security') }) %} +{% do SOCMERGED.config.eventFields.update({':endpoint:events.api': SOCMERGED.config.eventFields.pop(':endpoint:events_x_api') }) %} +{% do SOCMERGED.config.eventFields.update({':endpoint:events.file': SOCMERGED.config.eventFields.pop(':endpoint:events_x_file') }) %} +{% do SOCMERGED.config.eventFields.update({':endpoint:events.library': SOCMERGED.config.eventFields.pop(':endpoint:events_x_library') }) %} +{% do SOCMERGED.config.eventFields.update({':endpoint:events.network': SOCMERGED.config.eventFields.pop(':endpoint:events_x_network') }) %} +{% do SOCMERGED.config.eventFields.update({':endpoint:events.process': SOCMERGED.config.eventFields.pop(':endpoint:events_x_process') }) %} +{% do SOCMERGED.config.eventFields.update({':endpoint:events.registry': SOCMERGED.config.eventFields.pop(':endpoint:events_x_registry') }) %} +{% do SOCMERGED.config.eventFields.update({':endpoint:events.security': SOCMERGED.config.eventFields.pop(':endpoint:events_x_security') }) %} {% set standard_eventFields = SOCMERGED.config.pop('eventFields') %} {% do SOCMERGED.config.server.client.hunt.update({'eventFields': standard_eventFields}) %} {% do SOCMERGED.config.server.client.dashboards.update({'eventFields': standard_eventFields}) %} From d6cb8ab92823b95d0d35cee157549b52a91d2c97 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Fri, 23 Feb 2024 17:09:40 -0500 Subject: [PATCH 148/777] update events_x_process in defaults.yaml --- salt/soc/defaults.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index d672d1dad..7204027fc 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -995,7 +995,7 @@ soc: - tds.header_type - log.id.uid - event.dataset - ':endpoint:endpoint_x_events_x_process': + ':endpoint:events_x_process': - soc_timestamp - event.dataset - host.name From a6bb7216f9aaead1b51887cee531e8b74e6272c2 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Mon, 26 Feb 2024 08:18:42 -0500 Subject: [PATCH 149/777] Add Detection AutoUpdate config --- salt/soc/defaults.yaml | 2 ++ salt/soc/soc_soc.yaml | 9 +++++++++ 2 files changed, 11 insertions(+) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 2c15fe996..5267955b9 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1009,6 +1009,7 @@ soc: kratos: hostUrl: elastalertengine: + autoUpdateEnabled: false communityRulesImportFrequencySeconds: 180 elastAlertRulesFolder: /opt/sensoroni/elastalert rulesFingerprintFile: /opt/sensoroni/fingerprints/sigma.fingerprint @@ -1057,6 +1058,7 @@ soc: userFiles: - rbac/users_roles strelkaengine: + autoUpdateEnabled: false compileYaraPythonScriptPath: /opt/so/conf/strelka/compile_yara.py reposFolder: /opt/sensoroni/yara/repos rulesRepos: diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index fdfb09733..74ae1051b 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -75,6 +75,10 @@ soc: description: 'Defines the Sigma Community Ruleset you want to run. One of these (core | core+ | core++ | all ) as well as an optional Add-on (emerging_threats_addon). WARNING! Changing the ruleset will remove all existing Sigma rules of the previous ruleset and their associated overrides. This removal cannot be undone.' global: True advanced: False + autoUpdateEnabled: + description: 'Set to true to enable automatic updates of the Sigma Community Ruleset.' + global: True + advanced: True elastic: index: description: Comma-separated list of indices or index patterns (wildcard "*" supported) that SOC will search for records. @@ -133,6 +137,11 @@ soc: description: Duration (in milliseconds) to wait for a response from the Salt API when executing common grid management tasks before giving up and showing an error on the SOC UI. global: True advanced: True + strelkaengine: + autoUpdateEnabled: + description: 'Set to true to enable automatic updates of the Yara ruleset.' + global: True + advanced: True client: enableReverseLookup: description: Set to true to enable reverse DNS lookups for IP addresses in the SOC UI. From ca249312baddc9c3d6b7321f104ccc58da447acb Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Mon, 26 Feb 2024 09:38:14 -0500 Subject: [PATCH 150/777] FEATURE: Add new SOC action for Process Info #12421 --- salt/soc/defaults.yaml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 7204027fc..20f9c284f 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -65,12 +65,18 @@ soc: target: _blank links: - 'https://{:sublime.url}/messages/{:sublime.message_group_id}' + - name: actionProcessInfo + description: actionProcessInfoHelp + icon: fa-person-running + target: '' + links: + - '/#/hunt?q=(process.entity_id:"{:process.entity_id}") | groupby event.dataset | groupby -sankey event.dataset event.action | groupby event.action | groupby process.name | groupby host.name user.name | groupby source.ip source.port destination.ip destination.port | groupby dns.question.name | groupby dns.answers.data | groupby file.path | groupby registry.path | groupby dll.path' - name: actionProcessAncestors description: actionProcessAncestorsHelp icon: fa-people-roof target: '' links: - - '/#/hunt?q=(process.entity_id:"{:process.entity_id}" OR process.entity_id:"{:process.Ext.ancestry|processAncestors}") | groupby process.parent.name | groupby -sankey process.parent.name process.name | groupby process.name | groupby event.module event.dataset | table soc_timestamp event.dataset host.name user.name process.parent.name process.name process.working_directory' + - '/#/hunt?q=(process.entity_id:"{:process.entity_id}" OR process.entity_id:"{:process.Ext.ancestry|processAncestors}") | groupby event.dataset | groupby -sankey event.dataset event.action | groupby event.action | groupby process.parent.name | groupby -sankey process.parent.name process.name | groupby process.name | groupby host.name user.name | groupby source.ip source.port destination.ip destination.port | groupby dns.question.name | groupby dns.answers.data | groupby file.path | groupby registry.path | groupby dll.path' eventFields: default: - soc_timestamp From 4df21148fc4b7117b6a69b2914d961855d3f12e7 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Mon, 26 Feb 2024 09:40:51 -0500 Subject: [PATCH 151/777] FEATURE: Add default columns for endpoint.events datasets #12425 --- salt/soc/defaults.yaml | 57 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 56 insertions(+), 1 deletion(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 20f9c284f..b5fe0e626 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1001,14 +1001,69 @@ soc: - tds.header_type - log.id.uid - event.dataset + ':endpoint:events_x_api': + - soc_timestamp + - host.name + - user.name + - process.name + - process.Ext.api.name + - process.thread.Ext.call_stack_final_user_module.path + - event.dataset + ':endpoint:events_x_file': + - soc_timestamp + - host.name + - user.name + - process.name + - event.action + - file.path + - event.dataset + ':endpoint:events_x_library': + - soc_timestamp + - host.name + - user.name + - process.name + - event.action + - dll.path + - dll.code_signature.status + - dll.code_signature.subject_name + - event.dataset + ':endpoint:events_x_network': + - soc_timestamp + - host.name + - user.name + - process.name + - event.action + - source.ip + - source.port + - destination.ip + - destination.port + - network.community_id + - event.dataset ':endpoint:events_x_process': - soc_timestamp - - event.dataset - host.name - user.name - process.parent.name - process.name + - event.action - process.working_directory + - event.dataset + ':endpoint:events_x_registry': + - soc_timestamp + - host.name + - user.name + - process.name + - event.action + - registry.path + - event.dataset + ':endpoint:events_x_security': + - soc_timestamp + - host.name + - user.name + - process.executable + - event.action + - event.outcome + - event.dataset server: bindAddress: 0.0.0.0:9822 baseUrl: / From c8a95a87069a996543672c50485280394e45226d Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Mon, 26 Feb 2024 09:59:07 -0500 Subject: [PATCH 152/777] FEATURE: Add new endpoint dashboards #12428 --- salt/soc/defaults.yaml | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index b5fe0e626..c1b3ebabb 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1502,13 +1502,22 @@ soc: query: 'event.category: network AND _exists_:process.executable AND (_exists_:dns.question.name OR _exists_:dns.answers.data) | groupby -sankey host.name dns.question.name | groupby event.dataset event.type | groupby host.name | groupby process.executable | groupby dns.question.name | groupby dns.answers.data' - name: Host Process Activity description: Process activity captured on an endpoint - query: 'event.category:process | groupby -sankey host.name user.name* | groupby event.dataset event.action | groupby host.name | groupby user.name | groupby process.working_directory | groupby process.executable | groupby process.command_line | groupby process.parent.executable | groupby process.parent.command_line | groupby -sankey process.parent.executable process.executable | table soc_timestamp event.dataset host.name user.name process.parent.name process.name process.working_directory' + query: 'event.category:process | groupby -sankey host.name user.name* | groupby event.dataset event.action | groupby host.name | groupby user.name | groupby process.working_directory | groupby process.executable | groupby process.command_line | groupby process.parent.executable | groupby process.parent.command_line | groupby -sankey process.parent.executable process.executable' - name: Host File Activity description: File activity captured on an endpoint query: 'event.category: file AND _exists_:process.executable | groupby -sankey host.name process.executable | groupby host.name | groupby event.dataset event.action event.type | groupby file.name | groupby process.executable' - name: Host Network & Process Mappings description: Network activity mapped to originating processes query: 'event.category: network AND _exists_:process.executable | groupby -sankey event.action host.name | groupby -sankey host.name user.name | groupby event.dataset* event.type* event.action* | groupby host.name | groupby user.name | groupby dns.question.name | groupby process.executable | groupby winlog.event_data.TargetObject | groupby process.name | groupby source.ip | groupby destination.ip | groupby destination.port' + - name: Host API Events + description: API (Application Programming Interface) events from endpoints + query: 'event.dataset:endpoint.events.api | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby process.name | groupby process.Ext.api.name' + - name: Host Library Events + description: Library events from endpoints + query: 'event.dataset:endpoint.events.library | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby process.name | groupby event.action | groupby dll.path | groupby dll.code_signature.status | groupby dll.code_signature.subject_name' + - name: Host Security Events + description: Security events from endpoints + query: 'event.dataset:endpoint.events.security | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby process.executable | groupby event.action | groupby event.outcome' - name: Strelka description: Strelka file analysis query: 'event.module:strelka | groupby file.mime_type | groupby -sankey file.mime_type file.source | groupby file.source | groupby file.name' From 9a7e2153eedec1fbeb61df3db918ba5b7e7baa39 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 26 Feb 2024 11:01:53 -0500 Subject: [PATCH 153/777] add classification.config --- salt/suricata/classification/classification.config | 2 ++ salt/suricata/config.sls | 7 +++++++ salt/suricata/enabled.sls | 1 + salt/suricata/soc_suricata.yaml | 7 +++++++ 4 files changed, 17 insertions(+) create mode 100644 salt/suricata/classification/classification.config diff --git a/salt/suricata/classification/classification.config b/salt/suricata/classification/classification.config new file mode 100644 index 000000000..69918fed7 --- /dev/null +++ b/salt/suricata/classification/classification.config @@ -0,0 +1,2 @@ +# configuration classification: shortname,description,priority +# configuration classification: misc-activity,Misc activity,3 diff --git a/salt/suricata/config.sls b/salt/suricata/config.sls index 3ec1324bf..00364f384 100644 --- a/salt/suricata/config.sls +++ b/salt/suricata/config.sls @@ -129,6 +129,13 @@ surithresholding: - group: 940 - template: jinja +suriclassifications: + file.managed: + - name: /opt/so/conf/suricata/classification.config + - source: salt://suricata/classification/classification.config + - user: 940 + - group: 940 + # BPF compilation and configuration {% if SURICATABPF %} {% set BPF_CALC = salt['cmd.script']('salt://common/tools/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + SURICATABPF|join(" "),cwd='/root') %} diff --git a/salt/suricata/enabled.sls b/salt/suricata/enabled.sls index ce309e41a..f96472ae2 100644 --- a/salt/suricata/enabled.sls +++ b/salt/suricata/enabled.sls @@ -27,6 +27,7 @@ so-suricata: - binds: - /opt/so/conf/suricata/suricata.yaml:/etc/suricata/suricata.yaml:ro - /opt/so/conf/suricata/threshold.conf:/etc/suricata/threshold.conf:ro + - /opt/so/conf/suricata/classification.config:/etc/suricata/classification.config:ro - /opt/so/conf/suricata/rules:/etc/suricata/rules:ro - /opt/so/log/suricata/:/var/log/suricata/:rw - /nsm/suricata/:/nsm/:rw diff --git a/salt/suricata/soc_suricata.yaml b/salt/suricata/soc_suricata.yaml index 30f277c0a..4fd720ef1 100644 --- a/salt/suricata/soc_suricata.yaml +++ b/salt/suricata/soc_suricata.yaml @@ -11,6 +11,13 @@ suricata: multiline: True title: SIDS helpLink: suricata.html + classification: + classification__config: + description: Classifications config file. + file: True + global: True + multiline: True + helpLink: suricata.html config: af-packet: interface: From f8424f3dad29bed27f0d02006cb3af14f5f76e39 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Mon, 26 Feb 2024 11:22:09 -0500 Subject: [PATCH 154/777] Update defaults.yaml --- salt/soc/defaults.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index c1b3ebabb..d86262fe7 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1502,7 +1502,7 @@ soc: query: 'event.category: network AND _exists_:process.executable AND (_exists_:dns.question.name OR _exists_:dns.answers.data) | groupby -sankey host.name dns.question.name | groupby event.dataset event.type | groupby host.name | groupby process.executable | groupby dns.question.name | groupby dns.answers.data' - name: Host Process Activity description: Process activity captured on an endpoint - query: 'event.category:process | groupby -sankey host.name user.name* | groupby event.dataset event.action | groupby host.name | groupby user.name | groupby process.working_directory | groupby process.executable | groupby process.command_line | groupby process.parent.executable | groupby process.parent.command_line | groupby -sankey process.parent.executable process.executable' + query: 'event.category:process | groupby -sankey host.name user.name* | groupby event.dataset event.action | groupby host.name | groupby user.name | groupby process.working_directory | groupby process.executable | groupby process.command_line | groupby process.parent.executable | groupby process.parent.command_line | groupby -sankey process.parent.executable process.executable | table soc_timestamp host.name user.name process.parent.name process.name event.action process.working_directory event.dataset' - name: Host File Activity description: File activity captured on an endpoint query: 'event.category: file AND _exists_:process.executable | groupby -sankey host.name process.executable | groupby host.name | groupby event.dataset event.action event.type | groupby file.name | groupby process.executable' From 1d099f97d2577077607152ae2579e974936db53a Mon Sep 17 00:00:00 2001 From: weslambert Date: Mon, 26 Feb 2024 11:27:56 -0500 Subject: [PATCH 155/777] Update pattern for endpoint diagnostic template --- salt/elasticsearch/defaults.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index f4d8c8a95..8e28f2e41 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -3926,7 +3926,7 @@ elasticsearch: allow_custom_routing: false hidden: false index_patterns: - - logs-endpoint.diagnostic.collection-* + - .logs-endpoint.diagnostic.collection-* priority: 501 template: settings: From 466dac30bbf0f7b6d3d2d065ea8eed3639541ac8 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 26 Feb 2024 12:15:17 -0500 Subject: [PATCH 156/777] soup for classifications --- salt/manager/tools/sbin/soup | 15 +++++++++++++++ salt/suricata/soc_suricata.yaml | 1 + 2 files changed, 16 insertions(+) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 90ec636ef..752ae6e21 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -356,6 +356,7 @@ preupgrade_changes() { [[ "$INSTALLEDVERSION" == 2.4.20 ]] && up_to_2.4.30 [[ "$INSTALLEDVERSION" == 2.4.30 ]] && up_to_2.4.40 [[ "$INSTALLEDVERSION" == 2.4.40 ]] && up_to_2.4.50 + [[ "$INSTALLEDVERSION" == 2.4.50 ]] && up_to_2.4.60 true } @@ -371,6 +372,7 @@ postupgrade_changes() { [[ "$POSTVERSION" == 2.4.20 ]] && post_to_2.4.30 [[ "$POSTVERSION" == 2.4.30 ]] && post_to_2.4.40 [[ "$POSTVERSION" == 2.4.40 ]] && post_to_2.4.50 + [[ "$POSTVERSION" == 2.4.50 ]] && post_to_2.4.60 true } @@ -427,6 +429,11 @@ post_to_2.4.50() { POSTVERSION=2.4.50 } +post_to_2.4.60() { + echo "Nothing to apply" + POSTVERSION=2.4.60 +} + repo_sync() { echo "Sync the local repo." su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync." @@ -556,6 +563,14 @@ up_to_2.4.50() { INSTALLEDVERSION=2.4.50 } +up_to_2.4.60() { + echo "Creating directory to store Suricata classification.config" + mkdir -vp /opt/so/saltstack/local/salt/suricata/classification + chown socore:socore /opt/so/saltstack/local/salt/suricata/classification + + INSTALLEDVERSION=2.4.60 +} + determine_elastic_agent_upgrade() { if [[ $is_airgap -eq 0 ]]; then update_elastic_agent_airgap diff --git a/salt/suricata/soc_suricata.yaml b/salt/suricata/soc_suricata.yaml index 4fd720ef1..b54a44cbc 100644 --- a/salt/suricata/soc_suricata.yaml +++ b/salt/suricata/soc_suricata.yaml @@ -17,6 +17,7 @@ suricata: file: True global: True multiline: True + title: Classifications helpLink: suricata.html config: af-packet: From 8b7f7933bdfa4e67aa7e3a84dd1c3d03340935c7 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 26 Feb 2024 15:29:13 -0500 Subject: [PATCH 157/777] suricata container watch classification.config --- salt/suricata/classification/classification.config | 4 ++-- salt/suricata/enabled.sls | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/salt/suricata/classification/classification.config b/salt/suricata/classification/classification.config index 69918fed7..e597eb5a1 100644 --- a/salt/suricata/classification/classification.config +++ b/salt/suricata/classification/classification.config @@ -1,2 +1,2 @@ -# configuration classification: shortname,description,priority -# configuration classification: misc-activity,Misc activity,3 +# config classification: shortname,description,priority +# config classification: misc-activity,Misc activity,3 diff --git a/salt/suricata/enabled.sls b/salt/suricata/enabled.sls index f96472ae2..94b95ff5d 100644 --- a/salt/suricata/enabled.sls +++ b/salt/suricata/enabled.sls @@ -50,10 +50,12 @@ so-suricata: - file: surithresholding - file: /opt/so/conf/suricata/rules/ - file: /opt/so/conf/suricata/bpf + - file: suriclassifications - require: - file: suriconfig - file: surithresholding - file: suribpf + - file: suriclassifications delete_so-suricata_so-status.disabled: file.uncomment: From c6baa4be1baad88a7301e7c08d8c960d865afca0 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Mon, 26 Feb 2024 16:19:32 -0500 Subject: [PATCH 158/777] Airgap Support - Detections module --- salt/manager/tools/sbin/soup | 10 ++++++---- salt/soc/enabled.sls | 2 +- salt/soc/merged.map.jinja | 6 ++++++ setup/so-setup | 6 ++---- 4 files changed, 15 insertions(+), 9 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 90ec636ef..655e99f6c 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -603,6 +603,10 @@ update_airgap_rules() { if [ -d /nsm/repo/rules/sigma ]; then rsync -av $UPDATE_DIR/agrules/sigma/* /nsm/repo/rules/sigma/ fi + + # SOC Detections Airgap + rsync -av $UPDATE_DIR/agrules/detect-sigma/* /nsm/rules/detect-sigma/ + rsync -av $UPDATE_DIR/agrules/detect-yara/* /nsm/rules/detect-yara/ } update_airgap_repo() { @@ -931,10 +935,8 @@ main() { preupgrade_changes echo "" - if [[ $is_airgap -eq 0 ]]; then - echo "Updating Rule Files to the Latest." - update_airgap_rules - fi + echo "Updating Airgap Rule Files to the Latest." + update_airgap_rules # since we don't run the backup.config_backup state on import we wont snapshot previous version states and pillars if [[ ! "$MINIONID" =~ "_import" ]]; then diff --git a/salt/soc/enabled.sls b/salt/soc/enabled.sls index 7c04da825..93ca07ac8 100644 --- a/salt/soc/enabled.sls +++ b/salt/soc/enabled.sls @@ -22,7 +22,7 @@ so-soc: - sobridge: - ipv4_address: {{ DOCKER.containers['so-soc'].ip }} - binds: - - /nsm/rules:/nsm/rules:rw #Need to tighten this up? + - /nsm/rules:/nsm/rules:rw - /opt/so/conf/strelka:/opt/sensoroni/yara:rw - /opt/so/rules/elastalert/rules:/opt/sensoroni/elastalert:rw - /opt/so/conf/soc/fingerprints:/opt/sensoroni/fingerprints:rw diff --git a/salt/soc/merged.map.jinja b/salt/soc/merged.map.jinja index 65091158e..bc7c5cada 100644 --- a/salt/soc/merged.map.jinja +++ b/salt/soc/merged.map.jinja @@ -34,6 +34,12 @@ {% do SOCMERGED.config.server.client.inactiveTools.append('toolPlaybook') %} {% endif %} +{# if system is Airgap, don't autoupdate Yara & Sigma rules #} +{% if pillar.global.airgap %} + {% do SOCMERGED.config.server.modules.elastalertengine.update({'autoUpdateEnabled': false}) %} + {% do SOCMERGED.config.server.modules.strelkaengine.update({'autoUpdateEnabled': false}) %} +{% endif %} + {% set standard_actions = SOCMERGED.config.pop('actions') %} {% if pillar.global.endgamehost != '' %} diff --git a/setup/so-setup b/setup/so-setup index ca1581ef9..e2de39f50 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -687,10 +687,8 @@ if ! [[ -f $install_opt_file ]]; then logCmd "so-minion -o=setup" title "Creating Global SLS" - if [[ $is_airgap ]]; then - # Airgap Rules - airgap_rules - fi + # Airgap Rules + airgap_rules manager_pillar From 59af547838100d006f924ad43e302c7aac736bc6 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Tue, 27 Feb 2024 09:49:54 -0500 Subject: [PATCH 159/777] Fix download location --- salt/soc/soc_soc.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index fa488ab7a..e80e98a7e 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -83,7 +83,7 @@ soc: global: True advanced: False autoUpdateEnabled: - description: 'Set to true to enable automatic updates of the Sigma Community Ruleset.' + description: 'Set to true to enable automatic Internet-connected updates of the Sigma Community Ruleset. If this is an Airgap system, this setting will be overridden and set to false.' global: True advanced: True elastic: @@ -146,7 +146,7 @@ soc: advanced: True strelkaengine: autoUpdateEnabled: - description: 'Set to true to enable automatic updates of the Yara ruleset.' + description: 'Set to true to enable automatic Internet-connected updates of the Yara rulesets. If this is an Airgap system, this setting will be overridden and set to false.' global: True advanced: True client: From fcc0f9d14f50019dcad5ffc02035173fe50c6bbe Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 27 Feb 2024 13:20:58 -0500 Subject: [PATCH 160/777] redo classifications --- .../classification/classification.config | 53 ++++++++++++++++++- 1 file changed, 51 insertions(+), 2 deletions(-) diff --git a/salt/suricata/classification/classification.config b/salt/suricata/classification/classification.config index e597eb5a1..220736c94 100644 --- a/salt/suricata/classification/classification.config +++ b/salt/suricata/classification/classification.config @@ -1,2 +1,51 @@ -# config classification: shortname,description,priority -# config classification: misc-activity,Misc activity,3 +# +# config classification:shortname,short description,priority +# + +config classification: not-suspicious,Not Suspicious Traffic,3 +config classification: unknown,Unknown Traffic,3 +config classification: bad-unknown,Potentially Bad Traffic, 2 +config classification: attempted-recon,Attempted Information Leak,2 +config classification: successful-recon-limited,Information Leak,2 +config classification: successful-recon-largescale,Large Scale Information Leak,2 +config classification: attempted-dos,Attempted Denial of Service,2 +config classification: successful-dos,Denial of Service,2 +config classification: attempted-user,Attempted User Privilege Gain,1 +config classification: unsuccessful-user,Unsuccessful User Privilege Gain,1 +config classification: successful-user,Successful User Privilege Gain,1 +config classification: attempted-admin,Attempted Administrator Privilege Gain,1 +config classification: successful-admin,Successful Administrator Privilege Gain,1 + +# NEW CLASSIFICATIONS +config classification: rpc-portmap-decode,Decode of an RPC Query,2 +config classification: shellcode-detect,Executable code was detected,1 +config classification: string-detect,A suspicious string was detected,3 +config classification: suspicious-filename-detect,A suspicious filename was detected,2 +config classification: suspicious-login,An attempted login using a suspicious username was detected,2 +config classification: system-call-detect,A system call was detected,2 +config classification: tcp-connection,A TCP connection was detected,4 +config classification: trojan-activity,A Network Trojan was detected, 1 +config classification: unusual-client-port-connection,A client was using an unusual port,2 +config classification: network-scan,Detection of a Network Scan,3 +config classification: denial-of-service,Detection of a Denial of Service Attack,2 +config classification: non-standard-protocol,Detection of a non-standard protocol or event,2 +config classification: protocol-command-decode,Generic Protocol Command Decode,3 +config classification: web-application-activity,access to a potentially vulnerable web application,2 +config classification: web-application-attack,Web Application Attack,1 +config classification: misc-activity,Misc activity,3 +config classification: misc-attack,Misc Attack,2 +config classification: icmp-event,Generic ICMP event,3 +config classification: inappropriate-content,Inappropriate Content was Detected,1 +config classification: policy-violation,Potential Corporate Privacy Violation,1 +config classification: default-login-attempt,Attempt to login by a default username and password,2 + +# Update +config classification: targeted-activity,Targeted Malicious Activity was Detected,1 +config classification: exploit-kit,Exploit Kit Activity Detected,1 +config classification: external-ip-check,Device Retrieving External IP Address Detected,2 +config classification: domain-c2,Domain Observed Used for C2 Detected,1 +config classification: pup-activity,Possibly Unwanted Program Detected,2 +config classification: credential-theft,Successful Credential Theft Detected,1 +config classification: social-engineering,Possible Social Engineering Attempted,2 +config classification: coin-mining,Crypto Currency Mining Activity Detected,2 +config classification: command-and-control,Malware Command and Control Activity Detected,1 From df3943b4651e1bf293e0433d2138dece8ffc829b Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 27 Feb 2024 17:24:27 -0500 Subject: [PATCH 161/777] Daily rollover --- salt/elasticsearch/defaults.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index 8e28f2e41..39d218564 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -10597,7 +10597,7 @@ elasticsearch: hot: actions: rollover: - max_age: 30d + max_age: 1d max_primary_shard_size: 50gb set_priority: priority: 100 From e2dd0f8cf17950a6b35d66419085adb167a28e34 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Wed, 28 Feb 2024 09:39:23 -0500 Subject: [PATCH 162/777] Only update rule files if AG --- salt/manager/tools/sbin/soup | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 655e99f6c..ba8316116 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -935,8 +935,10 @@ main() { preupgrade_changes echo "" - echo "Updating Airgap Rule Files to the Latest." - update_airgap_rules + if [[ $is_airgap -eq 0 ]]; then + echo "Updating Rule Files to the Latest." + update_airgap_rules + fi # since we don't run the backup.config_backup state on import we wont snapshot previous version states and pillars if [[ ! "$MINIONID" =~ "_import" ]]; then From 53761d4dba278243222822f1a00a1ec9ba033891 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 29 Feb 2024 16:15:26 -0500 Subject: [PATCH 163/777] FIX: EA installers not downloadable from SOC + fix stg logging Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/stig/enabled.sls | 21 ++++++++++++--------- salt/stig/files/sos-oscap.xml | 6 +++--- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/salt/stig/enabled.sls b/salt/stig/enabled.sls index 1f1a064fd..0f347f8bc 100644 --- a/salt/stig/enabled.sls +++ b/salt/stig/enabled.sls @@ -48,15 +48,17 @@ update_stig_profile: {% if not salt['file.file_exists'](OSCAP_OUTPUT_DIR ~ '/pre-oscap-report.html') %} run_initial_scan: - module.run: - - name: openscap.xccdf - - params: 'eval --profile {{ OSCAP_PROFILE_NAME }} --results {{ OSCAP_OUTPUT_DIR }}/pre-oscap-results.xml --report {{ OSCAP_OUTPUT_DIR }}/pre-oscap-report.html {{ OSCAP_PROFILE_LOCATION }}' + cmd.run: + - name: 'oscap xccdf eval --profile {{ OSCAP_PROFILE_NAME }} --results {{ OSCAP_OUTPUT_DIR }}/pre-oscap-results.xml --report {{ OSCAP_OUTPUT_DIR }}/pre-oscap-report.html {{ OSCAP_PROFILE_LOCATION }}' + - success_retcodes: + - 2 {% endif %} run_remediate: - module.run: - - name: openscap.xccdf - - params: 'eval --remediate --profile {{ OSCAP_PROFILE_NAME }} --results {{ OSCAP_OUTPUT_DIR }}/post-oscap-results.xml --report {{ OSCAP_PROFILE_LOCATION }}' + cmd.run: + - name: 'oscap xccdf eval --remediate --profile {{ OSCAP_PROFILE_NAME }} {{ OSCAP_PROFILE_LOCATION }}' + - success_retcodes: + - 2 {# OSCAP rule id: xccdf_org.ssgproject.content_rule_disable_ctrlaltdel_burstaction #} disable_ctrl_alt_del_action: @@ -82,9 +84,10 @@ remove_nullok_from_system_auth_auth: - backup: '.bak' run_post_scan: - module.run: - - name: openscap.xccdf - - params: 'eval --profile {{ OSCAP_PROFILE_NAME }} --results {{ OSCAP_OUTPUT_DIR }}/post-oscap-results.xml --report {{ OSCAP_OUTPUT_DIR }}/post-oscap-report.html {{ OSCAP_PROFILE_LOCATION }}' + cmd.run: + - name: 'oscap xccdf eval --profile {{ OSCAP_PROFILE_NAME }} --results {{ OSCAP_OUTPUT_DIR }}/post-oscap-results.xml --report {{ OSCAP_OUTPUT_DIR }}/post-oscap-report.html {{ OSCAP_PROFILE_LOCATION }}' + - success_retcodes: + - 2 {% else %} {{sls}}_no_license_detected: diff --git a/salt/stig/files/sos-oscap.xml b/salt/stig/files/sos-oscap.xml index 3f78af8c0..6c4c93778 100644 --- a/salt/stig/files/sos-oscap.xml +++ b/salt/stig/files/sos-oscap.xml @@ -611,7 +611,7 @@ the release. Additionally, the original security profile has been modified by Se - + @@ -1007,8 +1007,8 @@ the release. Additionally, the original security profile has been modified by Se - - + + From b017157d21a7b32a8163b7593d2279a06d3a776f Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Fri, 1 Mar 2024 14:04:56 -0500 Subject: [PATCH 164/777] Add antivirus mapping --- salt/soc/files/soc/sigma_so_pipeline.yaml | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/salt/soc/files/soc/sigma_so_pipeline.yaml b/salt/soc/files/soc/sigma_so_pipeline.yaml index 533823e6f..37e9f4a3e 100644 --- a/salt/soc/files/soc/sigma_so_pipeline.yaml +++ b/salt/soc/files/soc/sigma_so_pipeline.yaml @@ -16,7 +16,25 @@ transformations: src_port: source.port dst_ip: destination.ip.keyword dst_port: destination.port - winlog.event_data.User: user.name + winlog.event_data.User: user.name + # Maps "antivirus" category to Windows Defender logs shipped by Elastic Agent Winlog Integration + # winlog.event_data.threat_name has to be renamed prior to ingestion, it is originally winlog.event_data.Threat Name + - id: antivirus_field-mappings_windows-defender + type: field_name_mapping + mapping: + Signature: winlog.event_data.threat_name + rule_conditions: + - type: logsource + category: antivirus + - id: antivirus_add-fields_windows-defender + type: add_condition + conditions: + winlog.channel: 'Microsoft-Windows-Windows Defender/Operational' + winlog.provider_name: 'Microsoft-Windows-Windows Defender' + event.code: "1116" + rule_conditions: + - type: logsource + category: antivirus - id: hashes_process-creation type: field_name_mapping mapping: From d832158cc52fe7c87d88fe233c38128d425d0a2f Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Fri, 1 Mar 2024 15:26:02 -0500 Subject: [PATCH 165/777] Drop Hashes field --- salt/soc/files/soc/sigma_so_pipeline.yaml | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/salt/soc/files/soc/sigma_so_pipeline.yaml b/salt/soc/files/soc/sigma_so_pipeline.yaml index 37e9f4a3e..d227c3f01 100644 --- a/salt/soc/files/soc/sigma_so_pipeline.yaml +++ b/salt/soc/files/soc/sigma_so_pipeline.yaml @@ -35,6 +35,17 @@ transformations: rule_conditions: - type: logsource category: antivirus + # Drops the Hashes field which is specific to Sysmon logs + # Ingested sysmon logs will have the Hashes field mapped to ECS specific fields + - id: hashes_drop_sysmon-specific-field + type: drop_detection_item + field_name_conditions: + - type: include_fields + fields: + - winlog.event_data.Hashes + rule_conditions: + - type: logsource + product: windows - id: hashes_process-creation type: field_name_mapping mapping: @@ -67,4 +78,4 @@ transformations: rule_conditions: - type: logsource product: windows - category: driver_load \ No newline at end of file + category: driver_load From f28f269bb120195d00066d7b7fcf0304906c8e86 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Mon, 4 Mar 2024 07:38:32 -0500 Subject: [PATCH 166/777] Fix FIM --- .../tools/sbin_jinja/so-elastic-agent-gen-installers | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-agent-gen-installers b/salt/elasticfleet/tools/sbin_jinja/so-elastic-agent-gen-installers index 275bc6a11..ff46a3e07 100755 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-agent-gen-installers +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-agent-gen-installers @@ -46,7 +46,7 @@ do done printf "\n### Stripping out unused components" -find /nsm/elastic-agent-workspace/elastic-agent-*/data/elastic-agent-*/components -maxdepth 1 -regex '.*fleet.*\|.*packet.*\|.*apm.*\|.*audit.*\|.*heart.*\|.*cloud.*' -delete +find /nsm/elastic-agent-workspace/elastic-agent-*/data/elastic-agent-*/components -maxdepth 1 -regex '.*fleet.*\|.*packet.*\|.*apm.*\|.*heart.*\|.*cloud.*' -delete printf "\n### Tarring everything up again" for OS in "${OSARCH[@]}" From 018e099111e86774e51126b42d22253b73ea7495 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 4 Mar 2024 14:53:15 -0500 Subject: [PATCH 167/777] Modify setup --- salt/manager/tools/sbin/so-minion | 39 +++++++++++++++++++------------ 1 file changed, 24 insertions(+), 15 deletions(-) diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index 4995e1c9d..4a6e5b7c7 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -80,28 +80,27 @@ function getinstallinfo() { } function pcapspace() { - - local NSMSIZE=$(salt '$MINION_ID' disk.usage --out=json | jq -r '.[]."/nsm"."1K-blocks" ') - local ROOTSIZE=$(salt '$MINION_ID' disk.usage --out=json | jq -r '.[]."/"."1K-blocks" ') - - if [[ "$NSMSIZE" == "null" ]]; then - # Looks like there is no dedicated nsm partition. Using root - local SPACESIZE=$ROOTSIZE + if [[ "$OPERATION" == "setup" ]]; then + local SPACESIZE=$(df -h /nsm | tail -1 | awk '{print $2}') else - local SPACESIZE=$NSMSIZE + + local NSMSIZE=$(salt '$MINION_ID' disk.usage --out=json | jq -r '.[]."/nsm"."1K-blocks" ') + local ROOTSIZE=$(salt '$MINION_ID' disk.usage --out=json | jq -r '.[]."/"."1K-blocks" ') + + if [[ "$NSMSIZE" == "null" ]]; then + # Looks like there is no dedicated nsm partition. Using root + local SPACESIZE=$ROOTSIZE + else + local SPACESIZE=$NSMSIZE + fi fi local s=$(( $SPACESIZE / 1000000 )) local s1=$(( $s / 2 )) local s2=$(( $s1 / $lb_procs )) - printf '%s\n'\ - "suricata:"\ - " config:"\ - " output:"\ - " pcap-log:"\ - " max-files: $s" >> $PILLARFILE - + MAXPCAPFILES=$s2 + } function testMinion() { @@ -272,6 +271,11 @@ function add_sensor_to_minion() { echo " config:" >> $PILLARFILE echo " af-packet:" >> $PILLARFILE echo " threads: '$CORECOUNT'" >> $PILLARFILE + if [[ $is_pcaplimit ]]; then + echo " output:" >> $PILLARFILE + echo " pcap-log:" >> $PILLARFILE + echo " max-files: '$MAXPCAPFILES'" >> $PILLARFILE + fi echo "pcap:" >> $PILLARFILE echo " enabled: True" >> $PILLARFILE if [[ $is_pcaplimit ]]; then @@ -448,6 +452,7 @@ function updateMine() { function createEVAL() { is_pcaplimit=true + pcapspace add_elasticsearch_to_minion add_sensor_to_minion add_strelka_to_minion @@ -468,6 +473,7 @@ function createEVAL() { function createSTANDALONE() { is_pcaplimit=true + pcapspace add_elasticsearch_to_minion add_logstash_to_minion add_sensor_to_minion @@ -557,6 +563,7 @@ function createIDH() { function createHEAVYNODE() { is_pcaplimit=true + pcapspace add_elasticsearch_to_minion add_elastic_agent_to_minion add_logstash_to_minion @@ -567,6 +574,8 @@ function createHEAVYNODE() { } function createSENSOR() { + is_pcaplimit=true + pcapspace add_sensor_to_minion add_strelka_to_minion add_telegraf_to_minion From fe238755e981fdc3316c48f03c23c9a6a29613ca Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 4 Mar 2024 16:52:51 -0500 Subject: [PATCH 168/777] Fix df --- salt/manager/tools/sbin/so-minion | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index 4a6e5b7c7..d696e14c6 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -81,7 +81,7 @@ function getinstallinfo() { function pcapspace() { if [[ "$OPERATION" == "setup" ]]; then - local SPACESIZE=$(df -h /nsm | tail -1 | awk '{print $2}') + local SPACESIZE=$(df -k /nsm | tail -1 | awk '{print $2}' | tr -d \n) else local NSMSIZE=$(salt '$MINION_ID' disk.usage --out=json | jq -r '.[]."/nsm"."1K-blocks" ') From b64d61065a7cf910a1c53d28485bfbee21edb4e9 Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 5 Mar 2024 09:19:43 -0500 Subject: [PATCH 169/777] Add AWS Cloudfront template --- salt/elasticsearch/defaults.yaml | 44 ++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index 39d218564..0d2dd8a41 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -1107,6 +1107,50 @@ elasticsearch: set_priority: priority: 50 min_age: 30d + so-logs-aws_x_cloudfront_logs: + index_sorting: False + index_template: + index_patterns: + - "logs-aws.cloudfront_logs-*" + template: + settings: + index: + lifecycle: + name: so-logs-aws.cloudfront_logs-logs + number_of_replicas: 0 + composed_of: + - "logs-aws.cloudfront_logs@package" + - "logs-aws.cloudfront_logs@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d so-logs-aws_x_cloudtrail: index_sorting: false index_template: From 1514f1291e2961dedd91354c0593ffa6e0854023 Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 5 Mar 2024 09:21:48 -0500 Subject: [PATCH 170/777] Add AWS GuardDuty template --- salt/elasticsearch/defaults.yaml | 44 ++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index 0d2dd8a41..54a65a112 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -1371,6 +1371,50 @@ elasticsearch: set_priority: priority: 50 min_age: 30d + so-logs-aws_x_guardduty: + index_sorting: False + index_template: + index_patterns: + - "logs-aws.guardduty-*" + template: + settings: + index: + lifecycle: + name: so-logs-aws.guardduty-logs + number_of_replicas: 0 + composed_of: + - "logs-aws.guardduty@package" + - "logs-aws.guardduty@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d so-logs-aws_x_route53_public_logs: index_sorting: false index_template: From d85ac39e2875dac3a58930abd2523f5be7af6ece Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 5 Mar 2024 09:23:17 -0500 Subject: [PATCH 171/777] Add AWS Inspector template --- salt/elasticsearch/defaults.yaml | 44 ++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index 54a65a112..8d31a1acd 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -1415,6 +1415,50 @@ elasticsearch: set_priority: priority: 50 min_age: 30d + so-logs-aws_x_inspector: + index_sorting: False + index_template: + index_patterns: + - "logs-aws.inspector-*" + template: + settings: + index: + lifecycle: + name: so-logs-aws.inspector-logs + number_of_replicas: 0 + composed_of: + - "logs-aws.inspector@package" + - "logs-aws.inspector@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d so-logs-aws_x_route53_public_logs: index_sorting: false index_template: From d8e8933ea0a035e0628a0bbcc65096a0c0a00b01 Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 5 Mar 2024 09:25:41 -0500 Subject: [PATCH 172/777] Add AWS Security Hub template --- salt/elasticsearch/defaults.yaml | 88 ++++++++++++++++++++++++++++++++ 1 file changed, 88 insertions(+) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index 8d31a1acd..2274018b1 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -1591,6 +1591,94 @@ elasticsearch: set_priority: priority: 50 min_age: 30d + so-logs-aws_x_securityhub_findings: + index_sorting: False + index_template: + index_patterns: + - "logs-aws.securityhub_findings-*" + template: + settings: + index: + lifecycle: + name: so-logs-aws.securityhub_findings-logs + number_of_replicas: 0 + composed_of: + - "logs-aws.securityhub_findings@package" + - "logs-aws.securityhub_findings@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-aws_x_securityhub_insights: + index_sorting: False + index_template: + index_patterns: + - "logs-aws.securityhub_insights-*" + template: + settings: + index: + lifecycle: + name: so-logs-aws.securityhub_insights-logs + number_of_replicas: 0 + composed_of: + - "logs-aws.securityhub_insights@package" + - "logs-aws.securityhub_insights@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d so-logs-aws_x_vpcflow: index_sorting: false index_template: From 2a7e5b096f0320dec1d395bf3fe3c5721582283f Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 5 Mar 2024 09:48:59 -0500 Subject: [PATCH 173/777] Change version for foxtrot --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 5a99ed019..7d52aac7f 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.4.60 +2.4.0-foxtrot From bed42208b1856f1e02033ac483a0610fa7adb76e Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 5 Mar 2024 09:49:55 -0500 Subject: [PATCH 174/777] Add journald integration --- salt/elasticfleet/defaults.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/elasticfleet/defaults.yaml b/salt/elasticfleet/defaults.yaml index e4f54ceb0..7b2d9d6a3 100644 --- a/salt/elasticfleet/defaults.yaml +++ b/salt/elasticfleet/defaults.yaml @@ -65,6 +65,7 @@ elasticfleet: - http_endpoint - httpjson - iis + - journald - juniper - juniper_srx - kafka_log From 08f2b8251b95638ef5611e203bd476f8e259c74c Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 5 Mar 2024 09:53:35 -0500 Subject: [PATCH 175/777] add GLOBALS.is_sensor --- salt/vars/globals.map.jinja | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/salt/vars/globals.map.jinja b/salt/vars/globals.map.jinja index 624173217..ed7129678 100644 --- a/salt/vars/globals.map.jinja +++ b/salt/vars/globals.map.jinja @@ -8,6 +8,7 @@ set GLOBALS = { 'hostname': INIT.GRAINS.nodename, 'is_manager': false, + 'is_sensor': false, 'manager': INIT.GRAINS.master, 'minion_id': INIT.GRAINS.id, 'main_interface': INIT.PILLAR.host.mainint, @@ -63,5 +64,8 @@ {% do GLOBALS.update({'is_manager': true}) %} {% endif %} +{% if GLOBALS.role in GLOBALS.sensor_roles %} +{% do GLOBALS.update({'is_sensor': true}) %} +{% endif %} {% do salt['defaults.merge'](GLOBALS, ROLE_GLOBALS, merge_lists=False, in_place=True) %} From 1a58aa61a0409889194fe7f427078c7de5623aff Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 5 Mar 2024 09:54:40 -0500 Subject: [PATCH 176/777] only import pcap and suricata if sensor --- salt/sensoroni/files/sensoroni.json | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/salt/sensoroni/files/sensoroni.json b/salt/sensoroni/files/sensoroni.json index 97c91f0b3..93708440a 100644 --- a/salt/sensoroni/files/sensoroni.json +++ b/salt/sensoroni/files/sensoroni.json @@ -1,7 +1,5 @@ -{%- from 'vars/globals.map.jinja' import GLOBALS %} -{%- from 'sensoroni/map.jinja' import SENSORONIMERGED %} -{%- from 'pcap/config.map.jinja' import PCAPMERGED %} -{%- from 'suricata/map.jinja' import SURICATAMERGED %} +{% from 'vars/globals.map.jinja' import GLOBALS %} +{%- from 'sensoroni/map.jinja' import SENSORONIMERGED -%} { "logFilename": "/opt/sensoroni/logs/sensoroni.log", "logLevel":"info", @@ -24,24 +22,28 @@ "importer": {}, "statickeyauth": { "apiKey": "{{ GLOBALS.sensoroni_key }}" -{#- if PCAPMERGED.enabled is true then we know that steno is the pcap engine #} -{#- if it is false, then user has steno disabled in ui or has selected suricata for pcap engine #} -{%- if PCAPMERGED.enabled %} +{% if GLOBALS.is_sensor %} +{% from 'pcap/config.map.jinja' import PCAPMERGED %} +{% from 'suricata/map.jinja' import SURICATAMERGED %} +{# if PCAPMERGED.enabled is true then we know that steno is the pcap engine #} +{# if it is false, then user has steno disabled in ui or has selected suricata for pcap engine #} +{%- if PCAPMERGED.enabled %} }, "stenoquery": { "executablePath": "/opt/sensoroni/scripts/stenoquery.sh", "pcapInputPath": "/nsm/pcap", "pcapOutputPath": "/nsm/pcapout" } -{%- elif GLOBALS.pcap_engine == "SURICATA" and SURICATAMERGED.enabled %} +{%- elif GLOBALS.pcap_engine == "SURICATA" and SURICATAMERGED.enabled %} }, "suriquery": { "executablePath": "/opt/sensoroni/scripts/suriquery.sh", "pcapInputPath": "/nsm/suripcap", "pcapOutputPath": "/nsm/pcapout" } -{%- else %} +{%- else %} } +{% endif %} {%- endif %} } } From c0d19e11b9bad6ee1ebb93088f42f3e44b0bc13c Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 5 Mar 2024 10:07:32 -0500 Subject: [PATCH 177/777] fix } placement --- salt/sensoroni/files/sensoroni.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/sensoroni/files/sensoroni.json b/salt/sensoroni/files/sensoroni.json index 93708440a..5090967ef 100644 --- a/salt/sensoroni/files/sensoroni.json +++ b/salt/sensoroni/files/sensoroni.json @@ -41,9 +41,9 @@ "pcapInputPath": "/nsm/suripcap", "pcapOutputPath": "/nsm/pcapout" } -{%- else %} +{% endif %} +{%- else %} } -{% endif %} {%- endif %} } } From b9ebe6c40b2545bfb9c0d18e75d2be67c5f3d9f2 Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 5 Mar 2024 12:58:34 -0500 Subject: [PATCH 178/777] Update VERSION --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 7d52aac7f..5a99ed019 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.4.0-foxtrot +2.4.60 From 6eb608c3f53f2a9b6743d02eebe080c469343995 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 5 Mar 2024 15:05:03 -0500 Subject: [PATCH 179/777] Update so-minion --- salt/manager/tools/sbin/so-minion | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index d696e14c6..82c19e39b 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -96,7 +96,7 @@ function pcapspace() { fi local s=$(( $SPACESIZE / 1000000 )) - local s1=$(( $s / 2 )) + local s1=$(( $s / 4 )) local s2=$(( $s1 / $lb_procs )) MAXPCAPFILES=$s2 From a686d46322ed335c8a7fd4220843e823511f2769 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 5 Mar 2024 15:09:02 -0500 Subject: [PATCH 180/777] Update so-minion --- salt/manager/tools/sbin/so-minion | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index 82c19e39b..7e33533b4 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -280,7 +280,7 @@ function add_sensor_to_minion() { echo " enabled: True" >> $PILLARFILE if [[ $is_pcaplimit ]]; then echo " config:" >> $PILLARFILE - echo " diskfreepercentage: 60" >> $PILLARFILE + echo " diskfreepercentage: 75" >> $PILLARFILE pcapspace fi echo " " >> $PILLARFILE From 4b5f00cef4d13a21ac8ff635a8ec625151e33e07 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Tue, 5 Mar 2024 16:42:20 -0500 Subject: [PATCH 181/777] fix oinkcodes with leading zeros --- salt/idstools/soc_idstools.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/idstools/soc_idstools.yaml b/salt/idstools/soc_idstools.yaml index f8ec3b8b6..3e3a68117 100644 --- a/salt/idstools/soc_idstools.yaml +++ b/salt/idstools/soc_idstools.yaml @@ -6,6 +6,7 @@ idstools: description: Enter your registration code or oinkcode for paid NIDS rulesets. title: Registration Code global: True + forcedType: string helpLink: rules.html ruleset: description: 'Defines the ruleset you want to run. Options are ETOPEN or ETPRO. WARNING! Changing the ruleset will remove all existing Suricata rules of the previous ruleset and their associated overrides. This removal cannot be undone.' From 5687fdcf578eb44aa11f72646a11c284ea57838e Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 5 Mar 2024 17:46:43 -0500 Subject: [PATCH 182/777] fix pcapspace function --- salt/manager/tools/sbin/so-minion | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index d696e14c6..54587774f 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -84,8 +84,8 @@ function pcapspace() { local SPACESIZE=$(df -k /nsm | tail -1 | awk '{print $2}' | tr -d \n) else - local NSMSIZE=$(salt '$MINION_ID' disk.usage --out=json | jq -r '.[]."/nsm"."1K-blocks" ') - local ROOTSIZE=$(salt '$MINION_ID' disk.usage --out=json | jq -r '.[]."/"."1K-blocks" ') + local NSMSIZE=$(salt "$MINION_ID" disk.usage --out=json | jq -r '.[]."/nsm"."1K-blocks" ') + local ROOTSIZE=$(salt "$MINION_ID" disk.usage --out=json | jq -r '.[]."/"."1K-blocks" ') if [[ "$NSMSIZE" == "null" ]]; then # Looks like there is no dedicated nsm partition. Using root @@ -97,7 +97,7 @@ function pcapspace() { local s=$(( $SPACESIZE / 1000000 )) local s1=$(( $s / 2 )) - local s2=$(( $s1 / $lb_procs )) + local s2=$(( $s1 / $CORECOUNT )) MAXPCAPFILES=$s2 From eaef076eba34104b5495aef002b8bab3e1b8422b Mon Sep 17 00:00:00 2001 From: Josh Patterson Date: Tue, 5 Mar 2024 17:52:24 -0500 Subject: [PATCH 183/777] Update so-minion --- salt/manager/tools/sbin/so-minion | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index 6f037d344..ab05fafcc 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -96,8 +96,8 @@ function pcapspace() { fi local s=$(( $SPACESIZE / 1000000 )) - local s2=$(( $s1 / $CORECOUNT )) local s1=$(( $s / 4 )) + local s2=$(( $s1 / $CORECOUNT )) MAXPCAPFILES=$s2 From 1b47537a3f2d808e8cc9176fbfe636edadea6c7f Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Wed, 6 Mar 2024 07:16:50 -0500 Subject: [PATCH 184/777] Add Exclusion toggle --- salt/soc/defaults.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index d5ecbe71f..60f2ee613 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1201,6 +1201,9 @@ soc: - name: caseExcludeToggle filter: 'NOT _index:"*:so-case*"' enabled: true + - name: detectionsExcludeToggle + filter: 'NOT _index:"*:so-detection*"' + enabled: true - name: socExcludeToggle filter: 'NOT event.module:"soc"' enabled: true @@ -1471,6 +1474,9 @@ soc: - name: caseExcludeToggle filter: 'NOT _index:"*:so-case*"' enabled: true + - name: detectionsExcludeToggle + filter: 'NOT _index:"*:so-detection*"' + enabled: true - name: socExcludeToggle filter: 'NOT event.module:"soc"' enabled: true From 12653eec8c4565e9d2538c803fc9a8e53edfb1c6 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 6 Mar 2024 08:14:33 -0500 Subject: [PATCH 185/777] add new pcap annotations --- salt/sensoroni/defaults.yaml | 118 ++++++++++++++-------------- salt/sensoroni/files/sensoroni.json | 3 +- salt/sensoroni/soc_sensoroni.yaml | 5 ++ salt/soc/defaults.yaml | 1 + salt/soc/soc_soc.yaml | 3 + 5 files changed, 71 insertions(+), 59 deletions(-) diff --git a/salt/sensoroni/defaults.yaml b/salt/sensoroni/defaults.yaml index 4ebd666a9..7777985dd 100644 --- a/salt/sensoroni/defaults.yaml +++ b/salt/sensoroni/defaults.yaml @@ -1,58 +1,60 @@ -sensoroni: - enabled: False - config: - analyze: - enabled: False - timeout_ms: 900000 - parallel_limit: 5 - node_checkin_interval_ms: 10000 - sensoronikey: - soc_host: - analyzers: - echotrail: - base_url: https://api.echotrail.io/insights/ - api_key: - elasticsearch: - base_url: - auth_user: - auth_pwd: - num_results: 10 - api_key: - index: _all - time_delta_minutes: 14400 - timestamp_field_name: '@timestamp' - map: {} - cert_path: - emailrep: - base_url: https://emailrep.io/ - api_key: - greynoise: - base_url: https://api.greynoise.io/ - api_key: - api_version: community - localfile: - file_path: [] - otx: - base_url: https://otx.alienvault.com/api/v1/ - api_key: - pulsedive: - base_url: https://pulsedive.com/api/ - api_key: - spamhaus: - lookup_host: zen.spamhaus.org - nameservers: [] - sublime_platform: - base_url: https://api.platform.sublimesecurity.com - api_key: - live_flow: False - mailbox_email_address: - message_source_id: - urlscan: - base_url: https://urlscan.io/api/v1/ - api_key: - enabled: False - visibility: public - timeout: 180 - virustotal: - base_url: https://www.virustotal.com/api/v3/search?query= - api_key: +sensoroni: + enabled: False + config: + analyze: + enabled: False + timeout_ms: 900000 + parallel_limit: 5 + node_checkin_interval_ms: 10000 + sensoronikey: + soc_host: + suripcap: + pcapMaxCount: 999999 + analyzers: + echotrail: + base_url: https://api.echotrail.io/insights/ + api_key: + elasticsearch: + base_url: + auth_user: + auth_pwd: + num_results: 10 + api_key: + index: _all + time_delta_minutes: 14400 + timestamp_field_name: '@timestamp' + map: {} + cert_path: + emailrep: + base_url: https://emailrep.io/ + api_key: + greynoise: + base_url: https://api.greynoise.io/ + api_key: + api_version: community + localfile: + file_path: [] + otx: + base_url: https://otx.alienvault.com/api/v1/ + api_key: + pulsedive: + base_url: https://pulsedive.com/api/ + api_key: + spamhaus: + lookup_host: zen.spamhaus.org + nameservers: [] + sublime_platform: + base_url: https://api.platform.sublimesecurity.com + api_key: + live_flow: False + mailbox_email_address: + message_source_id: + urlscan: + base_url: https://urlscan.io/api/v1/ + api_key: + enabled: False + visibility: public + timeout: 180 + virustotal: + base_url: https://www.virustotal.com/api/v3/search?query= + api_key: diff --git a/salt/sensoroni/files/sensoroni.json b/salt/sensoroni/files/sensoroni.json index 5090967ef..eb9c1131d 100644 --- a/salt/sensoroni/files/sensoroni.json +++ b/salt/sensoroni/files/sensoroni.json @@ -39,7 +39,8 @@ "suriquery": { "executablePath": "/opt/sensoroni/scripts/suriquery.sh", "pcapInputPath": "/nsm/suripcap", - "pcapOutputPath": "/nsm/pcapout" + "pcapOutputPath": "/nsm/pcapout", + "pcapMaxCount": {{ SENSORONIMERGED.config.suripcap.pcapMaxCount }} } {% endif %} {%- else %} diff --git a/salt/sensoroni/soc_sensoroni.yaml b/salt/sensoroni/soc_sensoroni.yaml index f7a10c6f7..7b8495dc5 100644 --- a/salt/sensoroni/soc_sensoroni.yaml +++ b/salt/sensoroni/soc_sensoroni.yaml @@ -37,6 +37,11 @@ sensoroni: helpLink: grid.html global: True advanced: True + suripcap: + pcapMaxCount: + description: The maximum number of PCAP packets to extract from eligible PCAP files, for PCAP jobs. If there are issues fetching excessively large packet streams consider lowering this value to reduce the number of collected packets returned to the user interface. + helpLink: sensoroni.html + advanced: True analyzers: echotrail: api_key: diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index d5ecbe71f..abcd12308 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1103,6 +1103,7 @@ soc: esSearchOffsetMs: 1800000 maxLogLength: 1024 asyncThreshold: 10 + lookupTunnelParent: true influxdb: hostUrl: token: diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index e80e98a7e..645a4c8a9 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -126,6 +126,9 @@ soc: description: Maximum number of events that can be acknowledged synchronously. When acknowledging large numbers of events, where the count exceeds this value, the acknowledge update will be performed in the background, as it can take several minutes to complete. global: True advanced: True + lookupTunnelParent: + description: When true, if a pivoted event appears to be encapsulated, such as in a VxLan packet, then SOC will pivot to the VxLan packet stream. This can be useful if the PCAP parser is unable to locate the encapsulated packets. However, if the parser is written in a way that it can find a given filter even if its encapsulated, this is best left to false, as the analyst will see the intended packet data rather than the wrapping packet data. + global: True sostatus: refreshIntervalMs: description: Duration (in milliseconds) between refreshes of the grid status. Shortening this duration may not have expected results, as the backend systems feeding this sostatus data will continue their updates as scheduled. From 0f12297f5019e91def5af525ffcd113108dc9e29 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 6 Mar 2024 08:19:42 -0500 Subject: [PATCH 186/777] add new pcap annotations --- salt/soc/soc_soc.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index 645a4c8a9..799d9af4e 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -127,7 +127,7 @@ soc: global: True advanced: True lookupTunnelParent: - description: When true, if a pivoted event appears to be encapsulated, such as in a VxLan packet, then SOC will pivot to the VxLan packet stream. This can be useful if the PCAP parser is unable to locate the encapsulated packets. However, if the parser is written in a way that it can find a given filter even if its encapsulated, this is best left to false, as the analyst will see the intended packet data rather than the wrapping packet data. + description: When true, if a pivoted event appears to be encapsulated, such as in a VXLAN packet, then SOC will pivot to the VXLAN packet stream. When false, SOC will attempt to pivot to the encapsulated packet stream itself, but at the risk that it may be unable to locate it in the stored PCAP data. global: True sostatus: refreshIntervalMs: From f58c104d899bf99fa1d3b566797eb2447a909917 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 6 Mar 2024 09:51:56 -0500 Subject: [PATCH 187/777] Update so-minion --- salt/manager/tools/sbin/so-minion | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index ab05fafcc..09708707f 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -81,6 +81,8 @@ function getinstallinfo() { function pcapspace() { if [[ "$OPERATION" == "setup" ]]; then + # Use 25% for PCAP + PCAP_PERCENTAGE=1 local SPACESIZE=$(df -k /nsm | tail -1 | awk '{print $2}' | tr -d \n) else @@ -96,10 +98,9 @@ function pcapspace() { fi local s=$(( $SPACESIZE / 1000000 )) - local s1=$(( $s / 4 )) - local s2=$(( $s1 / $CORECOUNT )) + local s1=$(( $s / 4 * $PCAP_PERCENTAGE )) - MAXPCAPFILES=$s2 + MAX_PCAP_SPACE=$s1 } @@ -271,17 +272,12 @@ function add_sensor_to_minion() { echo " config:" >> $PILLARFILE echo " af-packet:" >> $PILLARFILE echo " threads: '$CORECOUNT'" >> $PILLARFILE - if [[ $is_pcaplimit ]]; then - echo " output:" >> $PILLARFILE - echo " pcap-log:" >> $PILLARFILE - echo " max-files: '$MAXPCAPFILES'" >> $PILLARFILE - fi echo "pcap:" >> $PILLARFILE echo " enabled: True" >> $PILLARFILE if [[ $is_pcaplimit ]]; then echo " config:" >> $PILLARFILE echo " diskfreepercentage: 75" >> $PILLARFILE - pcapspace + echo " suripcapmaxsize: $MAX_PCAP_SPACE" >> $PILLARFILE fi echo " " >> $PILLARFILE } @@ -563,6 +559,7 @@ function createIDH() { function createHEAVYNODE() { is_pcaplimit=true + PCAP_PERCENTAGE=1 pcapspace add_elasticsearch_to_minion add_elastic_agent_to_minion @@ -575,6 +572,7 @@ function createHEAVYNODE() { function createSENSOR() { is_pcaplimit=true + PCAP_PERCENTAGE=3 pcapspace add_sensor_to_minion add_strelka_to_minion From a63fca727ce144a90d6fd01a82707ecfcf32a39f Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 6 Mar 2024 10:02:06 -0500 Subject: [PATCH 188/777] Update soc_suricata.yaml --- salt/suricata/soc_suricata.yaml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/salt/suricata/soc_suricata.yaml b/salt/suricata/soc_suricata.yaml index 4f9a80d86..fbd6e84ee 100644 --- a/salt/suricata/soc_suricata.yaml +++ b/salt/suricata/soc_suricata.yaml @@ -188,17 +188,11 @@ suricata: advanced: True readonly: True helpLink: suricata.html - limit: - description: File size limit per thread. To determine max PCAP size multiple threads x max-files x limit. - helpLink: suricata.html mode: description: Suricata PCAP mode. Currently only multi is supported. advanced: True readonly: True helpLink: suricata.html - max-files: - description: Max PCAP files per thread. To determine max PCAP size multiple threads x max-files x limit. - helpLink: suricata.html use-stream-depth: description: Set to "no" to ignore the stream depth and capture the entire flow. Set this to "yes" to truncate the flow based on the stream depth. advanced: True From 4dfa1a5626387ab70c389565fa74a542cc44949b Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 6 Mar 2024 10:35:10 -0500 Subject: [PATCH 190/777] Move Suricata around --- salt/suricata/defaults.yaml | 5 +++-- salt/suricata/soc_suricata.yaml | 8 ++++++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/salt/suricata/defaults.yaml b/salt/suricata/defaults.yaml index eb2c181e3..42af3fc55 100644 --- a/salt/suricata/defaults.yaml +++ b/salt/suricata/defaults.yaml @@ -1,5 +1,8 @@ suricata: enabled: False + pcap: + filesize: 1000mb + maxsize: 25 config: threading: set-cpu-affinity: "no" @@ -132,9 +135,7 @@ suricata: lz4-checksum: "no" lz4-level: 8 filename: "%n/so-pcap.%t" - limit: "1000mb" mode: "multi" - max-files: 10 use-stream-depth: "no" conditional: "all" dir: "/nsm/suripcap" diff --git a/salt/suricata/soc_suricata.yaml b/salt/suricata/soc_suricata.yaml index fbd6e84ee..88b460af8 100644 --- a/salt/suricata/soc_suricata.yaml +++ b/salt/suricata/soc_suricata.yaml @@ -19,6 +19,14 @@ suricata: multiline: True title: Classifications helpLink: suricata.html + pcap: + filesize: + description: Max file size for individual PCAP files written by Suricata. Increasing this number could improve write performance at the expense of pcap retrieval times. + advanced: True + helplink: suricata.html + maxsize: + description: Size in GB for total usage size of PCAP on disk. + helplink: suricata.html config: af-packet: interface: From 167aff24f61b8bbcaced10e05f8dfde3dd4acf0a Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 6 Mar 2024 11:03:52 -0500 Subject: [PATCH 191/777] detections annotations --- salt/soc/defaults.yaml | 1 + salt/soc/soc_soc.yaml | 9 ++++++--- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index abcd12308..ad1f8bb5f 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1147,6 +1147,7 @@ soc: tipTimeoutMs: 6000 cacheExpirationMs: 300000 casesEnabled: true + detectionsEnabled: false inactiveTools: ['toolUnused'] tools: - name: toolKibana diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index 799d9af4e..08a29766d 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -79,11 +79,11 @@ soc: modules: elastalertengine: sigmaRulePackages: - description: 'Defines the Sigma Community Ruleset you want to run. One of these (core | core+ | core++ | all ) as well as an optional Add-on (emerging_threats_addon). WARNING! Changing the ruleset will remove all existing Sigma rules of the previous ruleset and their associated overrides. This removal cannot be undone.' + description: 'Defines the Sigma Community Ruleset you want to run. One of these (core | core+ | core++ | all ) as well as an optional Add-on (emerging_threats_addon). WARNING! Changing the ruleset will remove all existing Sigma rules of the previous ruleset and their associated overrides. This removal cannot be undone. (future use, not yet complete)' global: True advanced: False autoUpdateEnabled: - description: 'Set to true to enable automatic Internet-connected updates of the Sigma Community Ruleset. If this is an Airgap system, this setting will be overridden and set to false.' + description: 'Set to true to enable automatic Internet-connected updates of the Sigma Community Ruleset. If this is an Airgap system, this setting will be overridden and set to false. (future use, not yet complete)' global: True advanced: True elastic: @@ -149,7 +149,7 @@ soc: advanced: True strelkaengine: autoUpdateEnabled: - description: 'Set to true to enable automatic Internet-connected updates of the Yara rulesets. If this is an Airgap system, this setting will be overridden and set to false.' + description: 'Set to true to enable automatic Internet-connected updates of the Yara rulesets. If this is an Airgap system, this setting will be overridden and set to false. (future use, not yet complete)' global: True advanced: True client: @@ -174,6 +174,9 @@ soc: casesEnabled: description: Set to true to enable case management in SOC. global: True + detectionsEnabled: + description: Set to true to enable the Detections module in SOC. (future use, not yet complete) + global: True inactiveTools: description: List of external tools to remove from the SOC UI. global: True From ad120934295999845436cdcb303ac3ca51c621e6 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 6 Mar 2024 11:05:06 -0500 Subject: [PATCH 192/777] Fix percent calc --- salt/manager/tools/sbin/so-minion | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index 09708707f..a3d8230b5 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -83,6 +83,7 @@ function pcapspace() { if [[ "$OPERATION" == "setup" ]]; then # Use 25% for PCAP PCAP_PERCENTAGE=1 + DFREEPERCENT=75 local SPACESIZE=$(df -k /nsm | tail -1 | awk '{print $2}' | tr -d \n) else @@ -269,6 +270,10 @@ function add_sensor_to_minion() { echo " lb_procs: '$CORECOUNT'" >> $PILLARFILE echo "suricata:" >> $PILLARFILE echo " enabled: True " >> $PILLARFILE + if [[ $is_pcaplimit ]]; then + echo " pcap:" >> $PILLARFILE + echo " maxsize: $MAX_PCAP_SPACE" >> $PILLARFILE + fi echo " config:" >> $PILLARFILE echo " af-packet:" >> $PILLARFILE echo " threads: '$CORECOUNT'" >> $PILLARFILE @@ -276,8 +281,7 @@ function add_sensor_to_minion() { echo " enabled: True" >> $PILLARFILE if [[ $is_pcaplimit ]]; then echo " config:" >> $PILLARFILE - echo " diskfreepercentage: 75" >> $PILLARFILE - echo " suripcapmaxsize: $MAX_PCAP_SPACE" >> $PILLARFILE + echo " diskfreepercentage: $DFREEPERCENT" >> $PILLARFILE fi echo " " >> $PILLARFILE } @@ -560,6 +564,7 @@ function createIDH() { function createHEAVYNODE() { is_pcaplimit=true PCAP_PERCENTAGE=1 + DFREEPERCENT=75 pcapspace add_elasticsearch_to_minion add_elastic_agent_to_minion @@ -572,6 +577,7 @@ function createHEAVYNODE() { function createSENSOR() { is_pcaplimit=true + DFREEPERCENT=10 PCAP_PERCENTAGE=3 pcapspace add_sensor_to_minion From 1cbac11fae137961e67155122856a816989c8a60 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 6 Mar 2024 11:08:03 -0500 Subject: [PATCH 193/777] detections annotations --- salt/soc/soc_soc.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index 08a29766d..cb939f758 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -34,7 +34,7 @@ soc: helpLink: soc-customization.html sigma_final_pipeline__yaml: title: Final Sigma Pipeline - description: Final Processing Pipeline for Sigma Rules + description: Final Processing Pipeline for Sigma Rules (future use, not yet complete) syntax: yaml file: True global: True From 9a413a2e3189aa24bc654c9ebe388e16153b898f Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 6 Mar 2024 12:42:22 -0500 Subject: [PATCH 194/777] Fix location of repo --- setup/so-functions | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index ef1df4a71..4aae0f5bd 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1811,8 +1811,8 @@ repo_sync_local() { info "Adding Repo Download Configuration" mkdir -p /nsm/repo mkdir -p /opt/so/conf/reposync/cache - echo "https://repo.securityonion.net/file/so-repo/prod/2.4/oracle/9.3" > /opt/so/conf/reposync/mirror.txt - echo "https://repo-alt.securityonion.net/prod/2.4/oracle/9.3" >> /opt/so/conf/reposync/mirror.txt + echo "https://repo.securityonion.net/file/so-repo/prod/2.4/oracle/9" > /opt/so/conf/reposync/mirror.txt + echo "https://repo-alt.securityonion.net/prod/2.4/oracle/9" >> /opt/so/conf/reposync/mirror.txt echo "[main]" > /opt/so/conf/reposync/repodownload.conf echo "gpgcheck=1" >> /opt/so/conf/reposync/repodownload.conf echo "installonly_limit=3" >> /opt/so/conf/reposync/repodownload.conf From 7f1e786e3d1e75297ecb7bda12a63a60d6521904 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 6 Mar 2024 12:56:09 -0500 Subject: [PATCH 195/777] Consolidate PCAP settings --- salt/suricata/defaults.yaml | 16 +++---- salt/suricata/soc_suricata.yaml | 84 ++++++++++++++++----------------- 2 files changed, 50 insertions(+), 50 deletions(-) diff --git a/salt/suricata/defaults.yaml b/salt/suricata/defaults.yaml index 42af3fc55..0252d3a81 100644 --- a/salt/suricata/defaults.yaml +++ b/salt/suricata/defaults.yaml @@ -3,6 +3,14 @@ suricata: pcap: filesize: 1000mb maxsize: 25 + compression: "none" + lz4-checksum: "no" + lz4-level: 8 + filename: "%n/so-pcap.%t" + mode: "multi" + use-stream-depth: "no" + conditional: "all" + dir: "/nsm/suripcap" config: threading: set-cpu-affinity: "no" @@ -131,14 +139,6 @@ suricata: enabled: "no" pcap-log: enabled: "no" - compression: "none" - lz4-checksum: "no" - lz4-level: 8 - filename: "%n/so-pcap.%t" - mode: "multi" - use-stream-depth: "no" - conditional: "all" - dir: "/nsm/suripcap" alert-debug: enabled: "no" alert-prelude: diff --git a/salt/suricata/soc_suricata.yaml b/salt/suricata/soc_suricata.yaml index 88b460af8..da7586e97 100644 --- a/salt/suricata/soc_suricata.yaml +++ b/salt/suricata/soc_suricata.yaml @@ -27,6 +27,47 @@ suricata: maxsize: description: Size in GB for total usage size of PCAP on disk. helplink: suricata.html + compression: + description: Enable compression of Suricata PCAP. Currently unsupported + advanced: True + readonly: True + helpLink: suricata.html + lz4-checksum: + description: Enable PCAP lz4 checksum. Currently unsupported + advanced: True + readonly: True + helpLink: suricata.html + lz4-level: + description: lz4 compression level of PCAP. 0 for no compression 16 for max compression. Currently unsupported + advanced: True + readonly: True + helpLink: suricata.html + filename: + description: Filename output for Suricata PCAP. + advanced: True + readonly: True + helpLink: suricata.html + mode: + description: Suricata PCAP mode. Currently only multi is supported. + advanced: True + readonly: True + helpLink: suricata.html + use-stream-depth: + description: Set to "no" to ignore the stream depth and capture the entire flow. Set this to "yes" to truncate the flow based on the stream depth. + advanced: True + regex: ^(yes|no)$ + regexFailureMessage: You must enter either yes or no. + helpLink: suricata.html + conditional: + description: Set to "all" to capture PCAP for all flows. Set to "alert" to capture PCAP just for alerts or set to "tag" to capture PCAP for just tagged rules. + regex: ^(all|alert|tag)$ + regexFailureMessage: You must enter either all, alert or tag. + helpLink: suricata.html + dir: + description: Parent directory to store PCAP. + advanced: True + readonly: True + helpLink: suricata.html config: af-packet: interface: @@ -175,48 +216,7 @@ suricata: description: This value is ignored by SO. pcapengine in globals takes precidence. readonly: True helpLink: suricata.html - advanced: True - compression: - description: Enable compression of Suricata PCAP. Currently unsupported - advanced: True - readonly: True - helpLink: suricata.html - lz4-checksum: - description: Enable PCAP lz4 checksum. Currently unsupported - advanced: True - readonly: True - helpLink: suricata.html - lz4-level: - description: lz4 compression level of PCAP. 0 for no compression 16 for max compression. Currently unsupported - advanced: True - readonly: True - helpLink: suricata.html - filename: - description: Filename output for Suricata PCAP. - advanced: True - readonly: True - helpLink: suricata.html - mode: - description: Suricata PCAP mode. Currently only multi is supported. - advanced: True - readonly: True - helpLink: suricata.html - use-stream-depth: - description: Set to "no" to ignore the stream depth and capture the entire flow. Set this to "yes" to truncate the flow based on the stream depth. - advanced: True - regex: ^(yes|no)$ - regexFailureMessage: You must enter either yes or no. - helpLink: suricata.html - conditional: - description: Set to "all" to capture PCAP for all flows. Set to "alert" to capture PCAP just for alerts or set to "tag" to capture PCAP for just tagged rules. - regex: ^(all|alert|tag)$ - regexFailureMessage: You must enter either all, alert or tag. - helpLink: suricata.html - dir: - description: Parent directory to store PCAP. - advanced: True - readonly: True - helpLink: suricata.html + advanced: True asn1-max-frames: description: Maximum nuber of asn1 frames to decode. helpLink: suricata.html From cf232534ca9a1609da8946adc24dd5f7116cfc24 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 6 Mar 2024 14:42:07 -0500 Subject: [PATCH 196/777] move suricata.pcap to suricata.config.outputs.pcap-log --- salt/suricata/map.jinja | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/salt/suricata/map.jinja b/salt/suricata/map.jinja index 5f6e913f5..d8ce271fe 100644 --- a/salt/suricata/map.jinja +++ b/salt/suricata/map.jinja @@ -63,6 +63,18 @@ {# before we change outputs back to list, enable pcap-log if suricata is the pcapengine #} {% if GLOBALS.pcap_engine == "SURICATA" %} {% do SURICATAMERGED.config.outputs['pcap-log'].update({'enabled': 'yes'}) %} +{# move the items in suricata.pcap into suricata.config.outputs.pcap-log. these items were placed under suricata.config for ease of access in SOC #} +{% do SURICATAMERGED.config.outputs['pcap-log'].update({'compression': SURICATAMERGED.pcap.compression}) %} +{% do SURICATAMERGED.config.outputs['pcap-log'].update({'lz4-checksum': SURICATAMERGED.pcap['lz4-checksum']}) %} +{% do SURICATAMERGED.config.outputs['pcap-log'].update({'lz4-level': SURICATAMERGED.pcap['lz4-level']}) %} +{% do SURICATAMERGED.config.outputs['pcap-log'].update({'filename': SURICATAMERGED.pcap.filename}) %} +{% do SURICATAMERGED.config.outputs['pcap-log'].update({'limit': SURICATAMERGED.pcap.filesize}) %} +{% do SURICATAMERGED.config.outputs['pcap-log'].update({'mode': SURICATAMERGED.pcap.mode}) %} +{% do SURICATAMERGED.config.outputs['pcap-log'].update({'max-stream-depth': SURICATAMERGED.pcap['max-stream-depth']}) %} +{% do SURICATAMERGED.config.outputs['pcap-log'].update({'conditional': SURICATAMERGED.pcap.conditional}) %} +{% do SURICATAMERGED.config.outputs['pcap-log'].update({'dir': SURICATAMERGED.pcap.dir}) %} +{% set maxfiles = (SURICATAMERGED.pcap.maxsize / SURICATAMERGED.pcap.filesize) | round | int %} +{% do SURICATAMERGED.config.outputs['pcap-log'].update({'max-files': maxfiles}) %} {% endif %} {# outputs is a list but we convert to dict in defaults to work with ui #} From 583227290f1fedc68ffb9abf14b2b92c8677171d Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 6 Mar 2024 15:18:22 -0500 Subject: [PATCH 197/777] fix max-files calc --- salt/suricata/map.jinja | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/salt/suricata/map.jinja b/salt/suricata/map.jinja index d8ce271fe..77cdbe7c5 100644 --- a/salt/suricata/map.jinja +++ b/salt/suricata/map.jinja @@ -70,10 +70,11 @@ {% do SURICATAMERGED.config.outputs['pcap-log'].update({'filename': SURICATAMERGED.pcap.filename}) %} {% do SURICATAMERGED.config.outputs['pcap-log'].update({'limit': SURICATAMERGED.pcap.filesize}) %} {% do SURICATAMERGED.config.outputs['pcap-log'].update({'mode': SURICATAMERGED.pcap.mode}) %} -{% do SURICATAMERGED.config.outputs['pcap-log'].update({'max-stream-depth': SURICATAMERGED.pcap['max-stream-depth']}) %} +{% do SURICATAMERGED.config.outputs['pcap-log'].update({'use-stream-depth': SURICATAMERGED.pcap['use-stream-depth']}) %} {% do SURICATAMERGED.config.outputs['pcap-log'].update({'conditional': SURICATAMERGED.pcap.conditional}) %} {% do SURICATAMERGED.config.outputs['pcap-log'].update({'dir': SURICATAMERGED.pcap.dir}) %} -{% set maxfiles = (SURICATAMERGED.pcap.maxsize / SURICATAMERGED.pcap.filesize) | round | int %} +{# multiply maxsize by 1000 since it is saved in GB, i.e. 52 = 52000MB. filesize is also saved in MB and we strip the MB and convert to int #} +{% set maxfiles = (SURICATAMERGED.pcap.maxsize * 1000 / SURICATAMERGED.pcap.filesize[:-2] | int) | round | int %} {% do SURICATAMERGED.config.outputs['pcap-log'].update({'max-files': maxfiles}) %} {% endif %} From 17a75d5bd251e758706e53380f29d89930eed997 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 6 Mar 2024 17:19:01 -0500 Subject: [PATCH 198/777] Run stig post remediate scan against default ol9 scap-security-guide. Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/stig/enabled.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/stig/enabled.sls b/salt/stig/enabled.sls index 0f347f8bc..c35c91a55 100644 --- a/salt/stig/enabled.sls +++ b/salt/stig/enabled.sls @@ -85,7 +85,7 @@ remove_nullok_from_system_auth_auth: run_post_scan: cmd.run: - - name: 'oscap xccdf eval --profile {{ OSCAP_PROFILE_NAME }} --results {{ OSCAP_OUTPUT_DIR }}/post-oscap-results.xml --report {{ OSCAP_OUTPUT_DIR }}/post-oscap-report.html {{ OSCAP_PROFILE_LOCATION }}' + - name: 'oscap xccdf eval --profile {{ OSCAP_PROFILE_NAME }} --results {{ OSCAP_OUTPUT_DIR }}/post-oscap-results.xml --report {{ OSCAP_OUTPUT_DIR }}/post-oscap-report.html /usr/share/xml/scap/ssg/content/ssg-ol9-ds.xml' - success_retcodes: - 2 From 70f3ce0536b4c4aa1c7b24bbea28968ec5fb7c28 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 6 Mar 2024 17:32:06 -0500 Subject: [PATCH 199/777] change how maxfiles is calculated --- salt/suricata/map.jinja | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/salt/suricata/map.jinja b/salt/suricata/map.jinja index 77cdbe7c5..6ba3c3b73 100644 --- a/salt/suricata/map.jinja +++ b/salt/suricata/map.jinja @@ -8,6 +8,24 @@ {% set surimeta_evelog_index = [] %} {% set surimeta_filestore_index = [] %} +{# before we change outputs back to list, enable pcap-log if suricata is the pcapengine #} +{% if GLOBALS.pcap_engine == "SURICATA" %} +{% do SURICATAMERGED.config.outputs['pcap-log'].update({'enabled': 'yes'}) %} +{# move the items in suricata.pcap into suricata.config.outputs.pcap-log. these items were placed under suricata.config for ease of access in SOC #} +{% do SURICATAMERGED.config.outputs['pcap-log'].update({'compression': SURICATAMERGED.pcap.compression}) %} +{% do SURICATAMERGED.config.outputs['pcap-log'].update({'lz4-checksum': SURICATAMERGED.pcap['lz4-checksum']}) %} +{% do SURICATAMERGED.config.outputs['pcap-log'].update({'lz4-level': SURICATAMERGED.pcap['lz4-level']}) %} +{% do SURICATAMERGED.config.outputs['pcap-log'].update({'filename': SURICATAMERGED.pcap.filename}) %} +{% do SURICATAMERGED.config.outputs['pcap-log'].update({'limit': SURICATAMERGED.pcap.filesize}) %} +{% do SURICATAMERGED.config.outputs['pcap-log'].update({'mode': SURICATAMERGED.pcap.mode}) %} +{% do SURICATAMERGED.config.outputs['pcap-log'].update({'use-stream-depth': SURICATAMERGED.pcap['use-stream-depth']}) %} +{% do SURICATAMERGED.config.outputs['pcap-log'].update({'conditional': SURICATAMERGED.pcap.conditional}) %} +{% do SURICATAMERGED.config.outputs['pcap-log'].update({'dir': SURICATAMERGED.pcap.dir}) %} +{# multiply maxsize by 1000 since it is saved in GB, i.e. 52 = 52000MB. filesize is also saved in MB and we strip the MB and convert to int #} +{% set maxfiles = (SURICATAMERGED.pcap.maxsize * 1000 / (SURICATAMERGED.pcap.filesize[:-2] | int) / SURICATAMERGED.config['af-packet'].threads | int) | round | int %} +{% do SURICATAMERGED.config.outputs['pcap-log'].update({'max-files': maxfiles}) %} +{% endif %} + {# suricata.config.af-packet has to be rewritten here since we cant display '- interface' in the ui #} {# we are limited to only one iterface #} {% load_yaml as afpacket %} @@ -60,24 +78,6 @@ {% do SURICATAMERGED.config.outputs['file-store'].update({'enabled':suricata_mdengine.suricata.config.outputs[surimeta_filestore_index]['file-store']['enabled']}) %} {% endif %} -{# before we change outputs back to list, enable pcap-log if suricata is the pcapengine #} -{% if GLOBALS.pcap_engine == "SURICATA" %} -{% do SURICATAMERGED.config.outputs['pcap-log'].update({'enabled': 'yes'}) %} -{# move the items in suricata.pcap into suricata.config.outputs.pcap-log. these items were placed under suricata.config for ease of access in SOC #} -{% do SURICATAMERGED.config.outputs['pcap-log'].update({'compression': SURICATAMERGED.pcap.compression}) %} -{% do SURICATAMERGED.config.outputs['pcap-log'].update({'lz4-checksum': SURICATAMERGED.pcap['lz4-checksum']}) %} -{% do SURICATAMERGED.config.outputs['pcap-log'].update({'lz4-level': SURICATAMERGED.pcap['lz4-level']}) %} -{% do SURICATAMERGED.config.outputs['pcap-log'].update({'filename': SURICATAMERGED.pcap.filename}) %} -{% do SURICATAMERGED.config.outputs['pcap-log'].update({'limit': SURICATAMERGED.pcap.filesize}) %} -{% do SURICATAMERGED.config.outputs['pcap-log'].update({'mode': SURICATAMERGED.pcap.mode}) %} -{% do SURICATAMERGED.config.outputs['pcap-log'].update({'use-stream-depth': SURICATAMERGED.pcap['use-stream-depth']}) %} -{% do SURICATAMERGED.config.outputs['pcap-log'].update({'conditional': SURICATAMERGED.pcap.conditional}) %} -{% do SURICATAMERGED.config.outputs['pcap-log'].update({'dir': SURICATAMERGED.pcap.dir}) %} -{# multiply maxsize by 1000 since it is saved in GB, i.e. 52 = 52000MB. filesize is also saved in MB and we strip the MB and convert to int #} -{% set maxfiles = (SURICATAMERGED.pcap.maxsize * 1000 / SURICATAMERGED.pcap.filesize[:-2] | int) | round | int %} -{% do SURICATAMERGED.config.outputs['pcap-log'].update({'max-files': maxfiles}) %} -{% endif %} - {# outputs is a list but we convert to dict in defaults to work with ui #} {# below they are converted back to lists #} {% load_yaml as outputs %} From 005930f7fd70577ced8bec4f25650ec42fb1eccd Mon Sep 17 00:00:00 2001 From: Wes Date: Thu, 7 Mar 2024 15:41:23 +0000 Subject: [PATCH 200/777] Add error.message mapping for system.syslog --- .../logs-system.syslog@custom.json | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-system.syslog@custom.json diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-system.syslog@custom.json b/salt/elasticsearch/templates/component/elastic-agent/logs-system.syslog@custom.json new file mode 100644 index 000000000..0123fb956 --- /dev/null +++ b/salt/elasticsearch/templates/component/elastic-agent/logs-system.syslog@custom.json @@ -0,0 +1,22 @@ +{ + "template": { + "mappings": { + "properties": { + "error": { + "properties": { + "message": { + "type": "match_only_text" + } + } + } + } + } + }, + "_meta": { + "package": { + "name": "system" + }, + "managed_by": "fleet", + "managed": true + } +} From fffef9b621d9afff1d6fcdae17c8a0fab76aadc5 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 7 Mar 2024 12:31:51 -0500 Subject: [PATCH 201/777] gracefully handle status check failure on ubuntu --- salt/common/tools/sbin/so-common-status-check | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/salt/common/tools/sbin/so-common-status-check b/salt/common/tools/sbin/so-common-status-check index d713ba6c6..625e0f199 100644 --- a/salt/common/tools/sbin/so-common-status-check +++ b/salt/common/tools/sbin/so-common-status-check @@ -47,10 +47,14 @@ def check_for_fps(): fps = 1 except FileNotFoundError: fn = '/proc/sys/crypto/' + feat_full + '_enabled' - with open(fn, 'r') as f: - contents = f.read() - if '1' in contents: - fps = 1 + try: + with open(fn, 'r') as f: + contents = f.read() + if '1' in contents: + fps = 1 + except: + # Unknown, so assume 0 + fps = 0 with open('/opt/so/log/sostatus/lks_enabled', 'w') as f: f.write(str(fps)) From 40574982e4afbf458a60d77267f1b2fde460fee9 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 7 Mar 2024 14:25:43 -0500 Subject: [PATCH 202/777] unswap files --- salt/common/tools/sbin/so-common-status-check | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/common/tools/sbin/so-common-status-check b/salt/common/tools/sbin/so-common-status-check index 625e0f199..4aa981123 100644 --- a/salt/common/tools/sbin/so-common-status-check +++ b/salt/common/tools/sbin/so-common-status-check @@ -56,7 +56,7 @@ def check_for_fps(): # Unknown, so assume 0 fps = 0 - with open('/opt/so/log/sostatus/lks_enabled', 'w') as f: + with open('/opt/so/log/sostatus/fps_enabled', 'w') as f: f.write(str(fps)) def check_for_lks(): @@ -80,7 +80,7 @@ def check_for_lks(): lks = 1 if lks: break - with open('/opt/so/log/sostatus/fps_enabled', 'w') as f: + with open('/opt/so/log/sostatus/lks_enabled', 'w') as f: f.write(str(lks)) def fail(msg): From 06257b9c4a0c046ca4d44ca9b1a301f799a92ec8 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 7 Mar 2024 14:32:46 -0500 Subject: [PATCH 203/777] Update so-minion --- salt/manager/tools/sbin/so-minion | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index a3d8230b5..cb4e40ade 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -83,7 +83,7 @@ function pcapspace() { if [[ "$OPERATION" == "setup" ]]; then # Use 25% for PCAP PCAP_PERCENTAGE=1 - DFREEPERCENT=75 + DFREEPERCENT=21 local SPACESIZE=$(df -k /nsm | tail -1 | awk '{print $2}' | tr -d \n) else @@ -564,7 +564,7 @@ function createIDH() { function createHEAVYNODE() { is_pcaplimit=true PCAP_PERCENTAGE=1 - DFREEPERCENT=75 + DFREEPERCENT=21 pcapspace add_elasticsearch_to_minion add_elastic_agent_to_minion From 3eb6fe2df97b76059ec9876f082061773ec4c71b Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 8 Mar 2024 09:52:12 -0500 Subject: [PATCH 204/777] allow managersearch to receiver redis and 5644 --- salt/firewall/defaults.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/salt/firewall/defaults.yaml b/salt/firewall/defaults.yaml index 75df49b25..75a70828e 100644 --- a/salt/firewall/defaults.yaml +++ b/salt/firewall/defaults.yaml @@ -1295,6 +1295,10 @@ firewall: portgroups: - redis - beats_5644 + managersearch: + portgroups: + - redis + - beats_5644 self: portgroups: - redis From 4e329359919367869ff8a0d3032e8823386863af Mon Sep 17 00:00:00 2001 From: Wes Date: Fri, 8 Mar 2024 16:24:37 +0000 Subject: [PATCH 205/777] Add Strelka config back --- salt/manager/init.sls | 45 +++++++++++++++++++++++++++++++++ salt/strelka/backend/config.sls | 10 ++++++++ 2 files changed, 55 insertions(+) diff --git a/salt/manager/init.sls b/salt/manager/init.sls index 51590a6ec..56e72c279 100644 --- a/salt/manager/init.sls +++ b/salt/manager/init.sls @@ -103,6 +103,51 @@ rules_dir: - group: socore - makedirs: True +{% if STRELKAMERGED.rules.enabled %} + strelkarepos: + file.managed: + - name: /opt/so/conf/strelka/repos.txt + - source: salt://strelka/rules/repos.txt.jinja + - template: jinja + - defaults: + STRELKAREPOS: {{ STRELKAMERGED.rules.repos }} + - makedirs: True + strelka-yara-update: + {% if MANAGERMERGED.reposync.enabled and not GLOBALS.airgap %} + cron.present: + {% else %} + cron.absent: + {% endif %} + - user: socore + - name: '/usr/sbin/so-yara-update >> /opt/so/log/yarasync/yara-update.log 2>&1' + - identifier: strelka-yara-update + - hour: '7' + - minute: '1' + strelka-yara-download: + {% if MANAGERMERGED.reposync.enabled and not GLOBALS.airgap %} + cron.present: + {% else %} + cron.absent: + {% endif %} + - user: socore + - name: '/usr/sbin/so-yara-download >> /opt/so/log/yarasync/yara-download.log 2>&1' + - identifier: strelka-yara-download + - hour: '7' + - minute: '1' + {% if not GLOBALS.airgap %} + update_yara_rules: + cmd.run: + - name: /usr/sbin/so-yara-update + - onchanges: + - file: yara_update_scripts + download_yara_rules: + cmd.run: + - name: /usr/sbin/so-yara-download + - onchanges: + - file: yara_update_scripts + {% endif %} + {% endif %} + {% else %} {{sls}}_state_not_allowed: diff --git a/salt/strelka/backend/config.sls b/salt/strelka/backend/config.sls index b39e06ac8..db18a68cc 100644 --- a/salt/strelka/backend/config.sls +++ b/salt/strelka/backend/config.sls @@ -50,6 +50,16 @@ backend_taste: - user: 939 - group: 939 +{% if STRELKAMERGED.rules.enabled %} +strelkarules: + file.recurse: + - name: /opt/so/conf/strelka/rules + - source: salt://strelka/rules + - user: 939 + - group: 939 + - clean: True +{% endif %} + {% else %} {{sls}}_state_not_allowed: From fc66a549027070ab75eecd9fda9d7fbb7aee074a Mon Sep 17 00:00:00 2001 From: Wes Date: Fri, 8 Mar 2024 16:26:14 +0000 Subject: [PATCH 206/777] Add Strelka download and update scripts back --- .../manager/tools/sbin_jinja/so-yara-download | 51 +++++++++++++++++++ salt/manager/tools/sbin_jinja/so-yara-update | 41 +++++++++++++++ 2 files changed, 92 insertions(+) create mode 100644 salt/manager/tools/sbin_jinja/so-yara-download create mode 100644 salt/manager/tools/sbin_jinja/so-yara-update diff --git a/salt/manager/tools/sbin_jinja/so-yara-download b/salt/manager/tools/sbin_jinja/so-yara-download new file mode 100644 index 000000000..aa9576253 --- /dev/null +++ b/salt/manager/tools/sbin_jinja/so-yara-download @@ -0,0 +1,51 @@ +#!/bin/bash +NOROOT=1 +. /usr/sbin/so-common + +{%- set proxy = salt['pillar.get']('manager:proxy') %} +{%- set noproxy = salt['pillar.get']('manager:no_proxy', '') %} + +# Download the rules from the internet +{%- if proxy %} +export http_proxy={{ proxy }} +export https_proxy={{ proxy }} +export no_proxy="{{ noproxy }}" +{%- endif %} + +repos="/opt/so/conf/strelka/repos.txt" +output_dir=/nsm/rules/yara +gh_status=$(curl -s -o /dev/null -w "%{http_code}" https://github.com) +clone_dir="/tmp" +if [ "$gh_status" == "200" ] || [ "$gh_status" == "301" ]; then + + while IFS= read -r repo; do + if ! $(echo "$repo" | grep -qE '^#'); then + # Remove old repo if existing bc of previous error condition or unexpected disruption + repo_name=`echo $repo | awk -F '/' '{print $NF}'` + [ -d $output_dir/$repo_name ] && rm -rf $output_dir/$repo_name + + # Clone repo and make appropriate directories for rules + git clone $repo $clone_dir/$repo_name + echo "Analyzing rules from $clone_dir/$repo_name..." + mkdir -p $output_dir/$repo_name + # Ensure a copy of the license is available for the rules + [ -f $clone_dir/$repo_name/LICENSE ] && cp $clone_dir/$repo_name/LICENSE $output_dir/$repo_name + + # Copy over rules + for i in $(find $clone_dir/$repo_name -name "*.yar*"); do + rule_name=$(echo $i | awk -F '/' '{print $NF}') + cp $i $output_dir/$repo_name + done + rm -rf $clone_dir/$repo_name + fi + done < $repos + + echo "Done!" + +/usr/sbin/so-yara-update + +else + echo "Server returned $gh_status status code." + echo "No connectivity to Github...exiting..." + exit 1 +fi diff --git a/salt/manager/tools/sbin_jinja/so-yara-update b/salt/manager/tools/sbin_jinja/so-yara-update new file mode 100644 index 000000000..07c940f47 --- /dev/null +++ b/salt/manager/tools/sbin_jinja/so-yara-update @@ -0,0 +1,41 @@ +#!/bin/bash +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +NOROOT=1 +. /usr/sbin/so-common + +echo "Starting to check for yara rule updates at $(date)..." + +newcounter=0 +excludedcounter=0 +excluded_rules=({{ EXCLUDEDRULES | join(' ') }}) + +# Pull down the SO Rules +SORULEDIR=/nsm/rules/yara +OUTPUTDIR=/opt/so/saltstack/local/salt/strelka/rules + +mkdir -p $OUTPUTDIR +# remove all rules prior to copy so we can clear out old rules +rm -f $OUTPUTDIR/* + +for i in $(find $SORULEDIR -name "*.yar" -o -name "*.yara"); do + rule_name=$(echo $i | awk -F '/' '{print $NF}') + if [[ ! "${excluded_rules[*]}" =~ ${rule_name} ]]; then + echo "Adding rule: $rule_name..." + cp $i $OUTPUTDIR/$rule_name + ((newcounter++)) + else + echo "Excluding rule: $rule_name..." + ((excludedcounter++)) + fi +done + +if [ "$newcounter" -gt 0 ] || [ "$excludedcounter" -gt 0 ];then + echo "$newcounter rules added." + echo "$excludedcounter rule(s) excluded." +fi + +echo "Finished rule updates at $(date)..." From e8ae60901233b06742a915cb64940e8564d75f37 Mon Sep 17 00:00:00 2001 From: Wes Date: Fri, 8 Mar 2024 16:27:17 +0000 Subject: [PATCH 207/777] Add Strelka rules watch back --- salt/strelka/backend/enabled.sls | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/strelka/backend/enabled.sls b/salt/strelka/backend/enabled.sls index 9ebb1a148..fc56f4197 100644 --- a/salt/strelka/backend/enabled.sls +++ b/salt/strelka/backend/enabled.sls @@ -42,8 +42,8 @@ strelka_backend: {% endfor %} {% endif %} - restart_policy: on-failure - #- watch: - # - file: strelkarules + - watch: + - file: strelkarules delete_so-strelka-backend_so-status.disabled: file.uncomment: From 6680e023e4f9dcec593d78967c9f6a49eba63856 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Fri, 8 Mar 2024 12:16:59 -0500 Subject: [PATCH 208/777] Update soc_pcap.yaml --- salt/pcap/soc_pcap.yaml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/salt/pcap/soc_pcap.yaml b/salt/pcap/soc_pcap.yaml index 32204a23a..96bc3831d 100644 --- a/salt/pcap/soc_pcap.yaml +++ b/salt/pcap/soc_pcap.yaml @@ -4,32 +4,32 @@ pcap: helpLink: stenographer.html config: maxdirectoryfiles: - description: The maximum number of packet/index files to create before deleting old files. + description: By default, Stenographer limits the number of files in the pcap directory to 30000 to avoid limitations with the ext3 filesystem. However, if you're using the ext4 or xfs filesystems, then it is safe to increase this value. So if you have a large amount of storage and find that you only have 3 weeks worth of PCAP on disk while still having plenty of free space, then you may want to increase this default setting. helpLink: stenographer.html diskfreepercentage: - description: The disk space percent to always keep free for PCAP + description: Stenographer will purge old PCAP on a regular basis to keep the disk free percentage at this level. If you have a distributed deployment with dedicated forward nodes, then the default value of 10 should be reasonable since Stenographer should be the main consumer of disk space in the /nsm partition. However, if you have systems that run both Stenographer and :ref:`elasticsearch` at the same time (like eval and standalone installations), then you’ll want to make sure that this value is no lower than 21 so that you avoid Elasticsearch hitting its watermark setting at 80% disk usage. If you have an older standalone installation, then you may need to manually change this value to 21. helpLink: stenographer.html blocks: - description: The number of 1MB packet blocks used by AF_PACKET to store packets in memory, per thread. You shouldn't need to change this. + description: The number of 1MB packet blocks used by Stenographer and AF_PACKET to store packets in memory, per thread. You shouldn't need to change this. advanced: True helpLink: stenographer.html preallocate_file_mb: - description: File size to pre-allocate for individual PCAP files. You shouldn't need to change this. + description: File size to pre-allocate for individual Stenographer PCAP files. You shouldn't need to change this. advanced: True helpLink: stenographer.html aiops: - description: The max number of async writes to allow at once. + description: The max number of async writes to allow for Stenographer at once. advanced: True helpLink: stenographer.html pin_to_cpu: - description: Enable CPU pinning for PCAP. + description: Enable CPU pinning for Stenographer PCAP. advanced: True helpLink: stenographer.html cpus_to_pin_to: - description: CPU to pin PCAP to. Currently only a single CPU is supported. + description: CPU to pin Stenographer PCAP to. Currently only a single CPU is supported. advanced: True helpLink: stenographer.html disks: - description: List of disks to use for PCAP. This is currently not used. + description: List of disks to use for Stenographer PCAP. This is currently not used. advanced: True helpLink: stenographer.html From 6f05c3976b45337fae4699c953345a80179b398b Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Fri, 8 Mar 2024 11:29:46 -0700 Subject: [PATCH 209/777] Updated RulesRepo for New Strelka Structure --- salt/soc/defaults.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 5699c7722..7be2db772 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1132,7 +1132,8 @@ soc: compileYaraPythonScriptPath: /opt/so/conf/strelka/compile_yara.py reposFolder: /opt/sensoroni/yara/repos rulesRepos: - - https://github.com/Security-Onion-Solutions/securityonion-yara + - repo: https://github.com/Security-Onion-Solutions/securityonion-yara + license: DRL yaraRulesFolder: /opt/sensoroni/yara/rules suricataengine: communityRulesFile: /nsm/rules/suricata/emerging-all.rules From a55e04e64a0fe67ec20b2813df0aec7fa679ad1b Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Fri, 8 Mar 2024 15:48:53 -0500 Subject: [PATCH 210/777] pcap improvements --- salt/pcap/config.sls | 7 ------- salt/pcap/init.sls | 9 +++++++++ salt/sensoroni/enabled.sls | 3 --- salt/sensoroni/files/sensoroni.json | 12 +----------- salt/suricata/enabled.sls | 5 ----- salt/suricata/init.sls | 1 + salt/suricata/pcap.sls | 10 ++++++++-- 7 files changed, 19 insertions(+), 28 deletions(-) diff --git a/salt/pcap/config.sls b/salt/pcap/config.sls index 9ea5cee65..eb37765c5 100644 --- a/salt/pcap/config.sls +++ b/salt/pcap/config.sls @@ -72,13 +72,6 @@ stenoca: - user: 941 - group: 939 -pcapdir: - file.directory: - - name: /nsm/pcap - - user: 941 - - group: 941 - - makedirs: True - pcaptmpdir: file.directory: - name: /nsm/pcaptmp diff --git a/salt/pcap/init.sls b/salt/pcap/init.sls index 9de272ad7..7a172e8fd 100644 --- a/salt/pcap/init.sls +++ b/salt/pcap/init.sls @@ -15,3 +15,12 @@ include: {% else %} - pcap.disabled {% endif %} + +# This directory needs to exist regardless of whether STENO is enabled or not, in order for +# Sensoroni to be able to look at old steno PCAP data +pcapdir: + file.directory: + - name: /nsm/pcap + - user: 941 + - group: 941 + - makedirs: True \ No newline at end of file diff --git a/salt/sensoroni/enabled.sls b/salt/sensoroni/enabled.sls index 6dc3df2bd..3f05568a0 100644 --- a/salt/sensoroni/enabled.sls +++ b/salt/sensoroni/enabled.sls @@ -23,10 +23,7 @@ so-sensoroni: - /opt/so/conf/sensoroni/sensoroni.json:/opt/sensoroni/sensoroni.json:ro - /opt/so/conf/sensoroni/analyzers:/opt/sensoroni/analyzers:rw - /opt/so/log/sensoroni:/opt/sensoroni/logs:rw - {% if GLOBALS.pcap_engine == "SURICATA" %} - /nsm/suripcap/:/nsm/suripcap:rw - - /nsm/suripcaptmp:/nsm/suripcaptmp:rw - {% endif %} {% if DOCKER.containers['so-sensoroni'].custom_bind_mounts %} {% for BIND in DOCKER.containers['so-sensoroni'].custom_bind_mounts %} - {{ BIND }} diff --git a/salt/sensoroni/files/sensoroni.json b/salt/sensoroni/files/sensoroni.json index eb9c1131d..f40f73167 100644 --- a/salt/sensoroni/files/sensoroni.json +++ b/salt/sensoroni/files/sensoroni.json @@ -23,29 +23,19 @@ "statickeyauth": { "apiKey": "{{ GLOBALS.sensoroni_key }}" {% if GLOBALS.is_sensor %} -{% from 'pcap/config.map.jinja' import PCAPMERGED %} -{% from 'suricata/map.jinja' import SURICATAMERGED %} -{# if PCAPMERGED.enabled is true then we know that steno is the pcap engine #} -{# if it is false, then user has steno disabled in ui or has selected suricata for pcap engine #} -{%- if PCAPMERGED.enabled %} }, "stenoquery": { "executablePath": "/opt/sensoroni/scripts/stenoquery.sh", "pcapInputPath": "/nsm/pcap", "pcapOutputPath": "/nsm/pcapout" - } -{%- elif GLOBALS.pcap_engine == "SURICATA" and SURICATAMERGED.enabled %} }, "suriquery": { "executablePath": "/opt/sensoroni/scripts/suriquery.sh", "pcapInputPath": "/nsm/suripcap", "pcapOutputPath": "/nsm/pcapout", "pcapMaxCount": {{ SENSORONIMERGED.config.suripcap.pcapMaxCount }} - } -{% endif %} -{%- else %} - } {%- endif %} + } } } } diff --git a/salt/suricata/enabled.sls b/salt/suricata/enabled.sls index fed5783e1..d35160527 100644 --- a/salt/suricata/enabled.sls +++ b/salt/suricata/enabled.sls @@ -12,9 +12,6 @@ include: - suricata.config - suricata.sostatus -{% if GLOBALS.pcap_engine == "SURICATA" %} - - suricata.pcap -{% endif %} so-suricata: docker_container.running: @@ -36,9 +33,7 @@ so-suricata: - /nsm/suricata/:/nsm/:rw - /nsm/suricata/extracted:/var/log/suricata//filestore:rw - /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro - {% if GLOBALS.pcap_engine == "SURICATA" %} - /nsm/suripcap/:/nsm/suripcap:rw - {% endif %} {% if DOCKER.containers['so-suricata'].custom_bind_mounts %} {% for BIND in DOCKER.containers['so-suricata'].custom_bind_mounts %} - {{ BIND }} diff --git a/salt/suricata/init.sls b/salt/suricata/init.sls index 64a000109..f6ddce862 100644 --- a/salt/suricata/init.sls +++ b/salt/suricata/init.sls @@ -7,6 +7,7 @@ {% from 'suricata/map.jinja' import SURICATAMERGED %} include: + - suricata.pcap {% if SURICATAMERGED.enabled and GLOBALS.role != 'so-import' %} - suricata.enabled {% elif GLOBALS.role == 'so-import' %} diff --git a/salt/suricata/pcap.sls b/salt/suricata/pcap.sls index a3cbafa0a..665262477 100644 --- a/salt/suricata/pcap.sls +++ b/salt/suricata/pcap.sls @@ -1,14 +1,18 @@ {% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'suricata/map.jinja' import SURICATAMERGED %} +# This directory needs to exist regardless of whether SURIPCAP is enabled or not, in order for +# Sensoroni to be able to look at old Suricata PCAP data suripcapdir: file.directory: - name: /nsm/suripcap - user: 940 - group: 939 - - mode: 755 + - mode: 775 - makedirs: True +{% if GLOBALS.pcap_engine == "SURICATA" %} + {# there should only be 1 interface in af-packet so we can just reference the first list item #} {% for i in range(1, SURICATAMERGED.config['af-packet'][0].threads + 1) %} @@ -17,6 +21,8 @@ suripcapthread{{i}}dir: - name: /nsm/suripcap/{{i}} - user: 940 - group: 939 - - mode: 755 + - mode: 775 {% endfor %} + +{% endif %} From a892352b612627c05ae83da48eb0bbc383c27d9c Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Fri, 8 Mar 2024 16:43:29 -0500 Subject: [PATCH 211/777] Update soc_pcap.yaml --- salt/pcap/soc_pcap.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/pcap/soc_pcap.yaml b/salt/pcap/soc_pcap.yaml index 96bc3831d..65fb99d86 100644 --- a/salt/pcap/soc_pcap.yaml +++ b/salt/pcap/soc_pcap.yaml @@ -7,7 +7,7 @@ pcap: description: By default, Stenographer limits the number of files in the pcap directory to 30000 to avoid limitations with the ext3 filesystem. However, if you're using the ext4 or xfs filesystems, then it is safe to increase this value. So if you have a large amount of storage and find that you only have 3 weeks worth of PCAP on disk while still having plenty of free space, then you may want to increase this default setting. helpLink: stenographer.html diskfreepercentage: - description: Stenographer will purge old PCAP on a regular basis to keep the disk free percentage at this level. If you have a distributed deployment with dedicated forward nodes, then the default value of 10 should be reasonable since Stenographer should be the main consumer of disk space in the /nsm partition. However, if you have systems that run both Stenographer and :ref:`elasticsearch` at the same time (like eval and standalone installations), then you’ll want to make sure that this value is no lower than 21 so that you avoid Elasticsearch hitting its watermark setting at 80% disk usage. If you have an older standalone installation, then you may need to manually change this value to 21. + description: Stenographer will purge old PCAP on a regular basis to keep the disk free percentage at this level. If you have a distributed deployment with dedicated forward nodes, then the default value of 10 should be reasonable since Stenographer should be the main consumer of disk space in the /nsm partition. However, if you have systems that run both Stenographer and Elasticsearch at the same time (like eval and standalone installations), then you’ll want to make sure that this value is no lower than 21 so that you avoid Elasticsearch hitting its watermark setting at 80% disk usage. If you have an older standalone installation, then you may need to manually change this value to 21. helpLink: stenographer.html blocks: description: The number of 1MB packet blocks used by Stenographer and AF_PACKET to store packets in memory, per thread. You shouldn't need to change this. From 34d5954e169972e21e412fb236f8bfc80cca788e Mon Sep 17 00:00:00 2001 From: weslambert Date: Mon, 11 Mar 2024 09:12:05 -0400 Subject: [PATCH 212/777] Fix indent --- salt/manager/init.sls | 86 +++++++++++++++++++++---------------------- 1 file changed, 43 insertions(+), 43 deletions(-) diff --git a/salt/manager/init.sls b/salt/manager/init.sls index ee564dce8..c62a41999 100644 --- a/salt/manager/init.sls +++ b/salt/manager/init.sls @@ -118,49 +118,49 @@ rules_dir: - makedirs: True {% if STRELKAMERGED.rules.enabled %} - strelkarepos: - file.managed: - - name: /opt/so/conf/strelka/repos.txt - - source: salt://strelka/rules/repos.txt.jinja - - template: jinja - - defaults: - STRELKAREPOS: {{ STRELKAMERGED.rules.repos }} - - makedirs: True - strelka-yara-update: - {% if MANAGERMERGED.reposync.enabled and not GLOBALS.airgap %} - cron.present: - {% else %} - cron.absent: - {% endif %} - - user: socore - - name: '/usr/sbin/so-yara-update >> /opt/so/log/yarasync/yara-update.log 2>&1' - - identifier: strelka-yara-update - - hour: '7' - - minute: '1' - strelka-yara-download: - {% if MANAGERMERGED.reposync.enabled and not GLOBALS.airgap %} - cron.present: - {% else %} - cron.absent: - {% endif %} - - user: socore - - name: '/usr/sbin/so-yara-download >> /opt/so/log/yarasync/yara-download.log 2>&1' - - identifier: strelka-yara-download - - hour: '7' - - minute: '1' - {% if not GLOBALS.airgap %} - update_yara_rules: - cmd.run: - - name: /usr/sbin/so-yara-update - - onchanges: - - file: yara_update_scripts - download_yara_rules: - cmd.run: - - name: /usr/sbin/so-yara-download - - onchanges: - - file: yara_update_scripts - {% endif %} - {% endif %} +strelkarepos: + file.managed: + - name: /opt/so/conf/strelka/repos.txt + - source: salt://strelka/rules/repos.txt.jinja + - template: jinja + - defaults: + STRELKAREPOS: {{ STRELKAMERGED.rules.repos }} + - makedirs: True +strelka-yara-update: + {% if MANAGERMERGED.reposync.enabled and not GLOBALS.airgap %} + cron.present: + {% else %} + cron.absent: + {% endif %} + - user: socore + - name: '/usr/sbin/so-yara-update >> /opt/so/log/yarasync/yara-update.log 2>&1' + - identifier: strelka-yara-update + - hour: '7' + - minute: '1' +strelka-yara-download: + {% if MANAGERMERGED.reposync.enabled and not GLOBALS.airgap %} + cron.present: + {% else %} + cron.absent: + {% endif %} + - user: socore + - name: '/usr/sbin/so-yara-download >> /opt/so/log/yarasync/yara-download.log 2>&1' + - identifier: strelka-yara-download + - hour: '7' + - minute: '1' +{% if not GLOBALS.airgap %} +update_yara_rules: + cmd.run: + - name: /usr/sbin/so-yara-update + - onchanges: + - file: yara_update_scripts +download_yara_rules: + cmd.run: + - name: /usr/sbin/so-yara-download + - onchanges: + - file: yara_update_scripts +{% endif %} +{% endif %} {% else %} From a8403c63c73d37ad97ee0e5565fe8c3109c4019c Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Mon, 11 Mar 2024 09:35:54 -0400 Subject: [PATCH 213/777] Create local salt dir for stig Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/manager/tools/sbin/soup | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 8b5d19751..028931012 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -537,6 +537,8 @@ up_to_2.4.40() { up_to_2.4.50() { echo "Creating additional pillars.." mkdir -p /opt/so/saltstack/local/pillar/stig/ + mkdir -p /opt/so/saltstack/local/salt/stig/ + chown socore:socore /opt/so/saltstack/local/salt/stig/ touch /opt/so/saltstack/local/pillar/stig/adv_stig.sls touch /opt/so/saltstack/local/pillar/stig/soc_stig.sls From 907cf9f9924d25c3e309a32900c9a522ffa8a212 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 11 Mar 2024 12:20:28 -0400 Subject: [PATCH 214/777] transition pcap --- salt/bpf/pcap.map.jinja | 17 ++++++++++------- salt/global/soc_global.yaml | 6 +++--- salt/suricata/map.jinja | 2 +- salt/suricata/pcap.sls | 2 +- salt/telegraf/scripts/oldpcap.sh | 2 +- 5 files changed, 16 insertions(+), 13 deletions(-) diff --git a/salt/bpf/pcap.map.jinja b/salt/bpf/pcap.map.jinja index c1d7562cc..a6deae4f4 100644 --- a/salt/bpf/pcap.map.jinja +++ b/salt/bpf/pcap.map.jinja @@ -1,7 +1,10 @@ -{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %} -{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %} -{% import 'bpf/macros.jinja' as MACROS %} - -{{ MACROS.remove_comments(BPFMERGED, 'pcap') }} - -{% set PCAPBPF = BPFMERGED.pcap %} +{% from 'vars/globals.map.jinja' import GLOBALS %} +{% if GLOBALS.pcap_engine == "TRANSITION" %} +{% set PCAPBPF = "ip and host 255.255.255.1 and port 1" %} +{% else %} +{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %} +{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %} +{% import 'bpf/macros.jinja' as MACROS %} +{{ MACROS.remove_comments(BPFMERGED, 'pcap') }} +{% set PCAPBPF = BPFMERGED.pcap %} +{% endif %} diff --git a/salt/global/soc_global.yaml b/salt/global/soc_global.yaml index d707fb1cc..a48476214 100644 --- a/salt/global/soc_global.yaml +++ b/salt/global/soc_global.yaml @@ -15,9 +15,9 @@ global: regexFailureMessage: You must enter either ZEEK or SURICATA. global: True pcapengine: - description: Which engine to use for generating pcap. Options are STENO and SURICATA. - regex: ^(STENO|SURICATA)$ - regexFailureMessage: You must enter either STENO or SURICATA. + description: Which engine to use for generating pcap. Options are STENO, SURICATA or TRANSITION. + regex: ^(STENO|SURICATA|TRANSITION)$ + regexFailureMessage: You must enter either STENO, SURICATA or TRANSITION. global: True ids: description: Which IDS engine to use. Currently only Suricata is supported. diff --git a/salt/suricata/map.jinja b/salt/suricata/map.jinja index 6ba3c3b73..7f7b04aef 100644 --- a/salt/suricata/map.jinja +++ b/salt/suricata/map.jinja @@ -9,7 +9,7 @@ {% set surimeta_filestore_index = [] %} {# before we change outputs back to list, enable pcap-log if suricata is the pcapengine #} -{% if GLOBALS.pcap_engine == "SURICATA" %} +{% if GLOBALS.pcap_engine in ["SURICATA", "TRANSITION"] %} {% do SURICATAMERGED.config.outputs['pcap-log'].update({'enabled': 'yes'}) %} {# move the items in suricata.pcap into suricata.config.outputs.pcap-log. these items were placed under suricata.config for ease of access in SOC #} {% do SURICATAMERGED.config.outputs['pcap-log'].update({'compression': SURICATAMERGED.pcap.compression}) %} diff --git a/salt/suricata/pcap.sls b/salt/suricata/pcap.sls index 665262477..87b568f96 100644 --- a/salt/suricata/pcap.sls +++ b/salt/suricata/pcap.sls @@ -11,7 +11,7 @@ suripcapdir: - mode: 775 - makedirs: True -{% if GLOBALS.pcap_engine == "SURICATA" %} +{% if GLOBALS.pcap_engine in ["SURICATA", "TRANSITION"] %} {# there should only be 1 interface in af-packet so we can just reference the first list item #} {% for i in range(1, SURICATAMERGED.config['af-packet'][0].threads + 1) %} diff --git a/salt/telegraf/scripts/oldpcap.sh b/salt/telegraf/scripts/oldpcap.sh index 438ce912c..876ff7835 100644 --- a/salt/telegraf/scripts/oldpcap.sh +++ b/salt/telegraf/scripts/oldpcap.sh @@ -5,7 +5,7 @@ # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. -{%- if GLOBALS.pcap_engine == "SURICATA" %} +{%- if GLOBALS.pcap_engine in ["SURICATA", "TRANSITION"] %} PCAPLOC=/host/nsm/suripcap {%- else %} PCAPLOC=/host/nsm/pcap From b5d8df7fb2ab72420d4fd35efe71cd9f3ead586b Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Mon, 11 Mar 2024 13:45:57 -0400 Subject: [PATCH 215/777] auto-convert email addresses to lowercase during setup --- setup/so-whiptail | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/setup/so-whiptail b/setup/so-whiptail index ede138d26..5e2a2de0e 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -195,10 +195,12 @@ whiptail_create_web_user() { [ -n "$TESTING" ] && return WEBUSER=$(whiptail --title "$whiptail_title" --inputbox \ - "Please enter an email address to create an administrator account for the Security Onion Console (SOC) web interface.\n\nThis will also be used for Elasticsearch and Kibana." 12 60 "$1" 3>&1 1>&2 2>&3) + "Please enter an email address to create an administrator account for the Security Onion Console (SOC) web interface.\n\nThis will also be used for Elasticsearch and Kibana.\n\nMust only include letters, numbers, or + - _ % . @ characters. All capitalized letters will be converted to lowercase." 12 60 "$1" 3>&1 1>&2 2>&3) local exitstatus=$? whiptail_check_exitstatus $exitstatus + + WEBUSER=${WEBUSER,,} } whiptail_create_web_user_password1() { From cd28c00d67dd658bf247e8d3c1401f35c67f7e80 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Mon, 11 Mar 2024 13:47:31 -0400 Subject: [PATCH 216/777] auto-convert email addresses to lowercase during setup --- setup/so-whiptail | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-whiptail b/setup/so-whiptail index 5e2a2de0e..ff8c9fe8d 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -195,7 +195,7 @@ whiptail_create_web_user() { [ -n "$TESTING" ] && return WEBUSER=$(whiptail --title "$whiptail_title" --inputbox \ - "Please enter an email address to create an administrator account for the Security Onion Console (SOC) web interface.\n\nThis will also be used for Elasticsearch and Kibana.\n\nMust only include letters, numbers, or + - _ % . @ characters. All capitalized letters will be converted to lowercase." 12 60 "$1" 3>&1 1>&2 2>&3) + "Please enter an email address to create an administrator account for the Security Onion Console (SOC) web interface.\n\nThis will also be used for Elasticsearch and Kibana.\n\nMust only include letters, numbers, or + - _ % . @ characters. All capitalized letters will be converted to lowercase." 15 60 "$1" 3>&1 1>&2 2>&3) local exitstatus=$? whiptail_check_exitstatus $exitstatus From ba32b3e6e9d23a7c34fadef272f5bf8ec2e52ae3 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 11 Mar 2024 14:07:45 -0400 Subject: [PATCH 217/777] fix bpf for transition --- salt/bpf/pcap.map.jinja | 2 +- salt/soc/defaults.yaml | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/salt/bpf/pcap.map.jinja b/salt/bpf/pcap.map.jinja index a6deae4f4..4d8fef460 100644 --- a/salt/bpf/pcap.map.jinja +++ b/salt/bpf/pcap.map.jinja @@ -1,6 +1,6 @@ {% from 'vars/globals.map.jinja' import GLOBALS %} {% if GLOBALS.pcap_engine == "TRANSITION" %} -{% set PCAPBPF = "ip and host 255.255.255.1 and port 1" %} +{% set PCAPBPF = ["ip and host 255.255.255.1 and port 1"] %} {% else %} {% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %} {% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %} diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 5699c7722..7be2db772 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1132,7 +1132,8 @@ soc: compileYaraPythonScriptPath: /opt/so/conf/strelka/compile_yara.py reposFolder: /opt/sensoroni/yara/repos rulesRepos: - - https://github.com/Security-Onion-Solutions/securityonion-yara + - repo: https://github.com/Security-Onion-Solutions/securityonion-yara + license: DRL yaraRulesFolder: /opt/sensoroni/yara/rules suricataengine: communityRulesFile: /nsm/rules/suricata/emerging-all.rules From 61a183b7fc567ebdce5f9252a6af87d0330fda19 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Mon, 11 Mar 2024 15:55:39 -0400 Subject: [PATCH 218/777] Add regex defaults --- salt/soc/defaults.yaml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 7be2db772..197aee070 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1078,8 +1078,10 @@ soc: kratos: hostUrl: elastalertengine: + allowRegex: '' autoUpdateEnabled: false - communityRulesImportFrequencySeconds: 180 + communityRulesImportFrequencySeconds: 86400 + denyRegex: '.*' elastAlertRulesFolder: /opt/sensoroni/elastalert rulesFingerprintFile: /opt/sensoroni/fingerprints/sigma.fingerprint sigmaRulePackages: @@ -1128,15 +1130,19 @@ soc: userFiles: - rbac/users_roles strelkaengine: + allowRegex: '' autoUpdateEnabled: false compileYaraPythonScriptPath: /opt/so/conf/strelka/compile_yara.py + denyRegex: '.*' reposFolder: /opt/sensoroni/yara/repos rulesRepos: - repo: https://github.com/Security-Onion-Solutions/securityonion-yara license: DRL yaraRulesFolder: /opt/sensoroni/yara/rules suricataengine: + allowRegex: '' communityRulesFile: /nsm/rules/suricata/emerging-all.rules + denyRegex: '.*' rulesFingerprintFile: /opt/sensoroni/fingerprints/emerging-all.fingerprint client: enableReverseLookup: false From 72acb11925bd85afcbd1fc59f167ff96520fb253 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 11 Mar 2024 19:04:51 -0400 Subject: [PATCH 219/777] Update soc_suricata.yaml --- salt/suricata/soc_suricata.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/suricata/soc_suricata.yaml b/salt/suricata/soc_suricata.yaml index da7586e97..c61c04123 100644 --- a/salt/suricata/soc_suricata.yaml +++ b/salt/suricata/soc_suricata.yaml @@ -59,8 +59,8 @@ suricata: regexFailureMessage: You must enter either yes or no. helpLink: suricata.html conditional: - description: Set to "all" to capture PCAP for all flows. Set to "alert" to capture PCAP just for alerts or set to "tag" to capture PCAP for just tagged rules. - regex: ^(all|alert|tag)$ + description: Set to "all" to capture PCAP for all flows. Set to "alerts" to capture PCAP just for alerts or set to "tag" to capture PCAP for just tagged rules. + regex: ^(all|alerts|tag)$ regexFailureMessage: You must enter either all, alert or tag. helpLink: suricata.html dir: From 3e0fb3f8bb953f32dde333b25ad35266689e6fbc Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 12 Mar 2024 10:18:27 -0400 Subject: [PATCH 220/777] Update so-saltstack-update --- salt/manager/tools/sbin/so-saltstack-update | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/so-saltstack-update b/salt/manager/tools/sbin/so-saltstack-update index b15fce008..4be8f095c 100755 --- a/salt/manager/tools/sbin/so-saltstack-update +++ b/salt/manager/tools/sbin/so-saltstack-update @@ -47,7 +47,7 @@ got_root(){ got_root if [ $# -ne 1 ] ; then - BRANCH=master + BRANCH=2.4/main else BRANCH=$1 fi From 06013e2c6fd75d04cab6fda7ccc10c455e2c36f5 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Wed, 13 Mar 2024 07:23:43 -0400 Subject: [PATCH 221/777] Gen packages post-SOUP --- salt/manager/tools/sbin/soup | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 028931012..a585f877c 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -430,7 +430,8 @@ post_to_2.4.50() { } post_to_2.4.60() { - echo "Nothing to apply" + echo "Regenerating Elastic Agent Installers..." + so-elastic-agent-gen-installers POSTVERSION=2.4.60 } From 1a829190ac4ba8777ac68bc865bfc6944be55d92 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 13 Mar 2024 09:46:44 -0400 Subject: [PATCH 222/777] remove modules if detections disabled --- salt/soc/defaults.yaml | 2 +- salt/soc/merged.map.jinja | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 197aee070..de372a98f 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1081,7 +1081,7 @@ soc: allowRegex: '' autoUpdateEnabled: false communityRulesImportFrequencySeconds: 86400 - denyRegex: '.*' + denyRegex: '' elastAlertRulesFolder: /opt/sensoroni/elastalert rulesFingerprintFile: /opt/sensoroni/fingerprints/sigma.fingerprint sigmaRulePackages: diff --git a/salt/soc/merged.map.jinja b/salt/soc/merged.map.jinja index bc7c5cada..2012917af 100644 --- a/salt/soc/merged.map.jinja +++ b/salt/soc/merged.map.jinja @@ -30,6 +30,13 @@ {# since cases is not a valid soc config item and only used for the map files, remove it from being placed in the config #} {% do SOCMERGED.config.server.modules.pop('cases') %} +{# remove these modules if detections is disabled #} +{% if not SOCMERGED.config.server.client.detectionsEnabled %} +{% do SOCMERGED.config.server.modules.pop('elastalertengine') %} +{% do SOCMERGED.config.server.modules.pop('strelkaengine') %} +{% do SOCMERGED.config.server.modules.pop('suricataengine') %} +{% endif %} + {% if pillar.manager.playbook == 0 %} {% do SOCMERGED.config.server.client.inactiveTools.append('toolPlaybook') %} {% endif %} From b9702d02db13c83765894067c9b07732ee41eff7 Mon Sep 17 00:00:00 2001 From: Josh Patterson Date: Wed, 13 Mar 2024 11:24:26 -0400 Subject: [PATCH 223/777] Update init.sls --- salt/salt/init.sls | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/salt/init.sls b/salt/salt/init.sls index a190a84eb..b2ea31a65 100644 --- a/salt/salt/init.sls +++ b/salt/salt/init.sls @@ -10,3 +10,4 @@ salt_bootstrap: - name: /usr/sbin/bootstrap-salt.sh - source: salt://salt/scripts/bootstrap-salt.sh - mode: 755 + - show_changes: False From 275a678fa1a90369d2d64ff2260679975384f548 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 13 Mar 2024 13:49:44 -0400 Subject: [PATCH 224/777] removed unused property --- salt/sensoroni/files/sensoroni.json | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/sensoroni/files/sensoroni.json b/salt/sensoroni/files/sensoroni.json index f40f73167..547e52ada 100644 --- a/salt/sensoroni/files/sensoroni.json +++ b/salt/sensoroni/files/sensoroni.json @@ -30,7 +30,6 @@ "pcapOutputPath": "/nsm/pcapout" }, "suriquery": { - "executablePath": "/opt/sensoroni/scripts/suriquery.sh", "pcapInputPath": "/nsm/suripcap", "pcapOutputPath": "/nsm/pcapout", "pcapMaxCount": {{ SENSORONIMERGED.config.suripcap.pcapMaxCount }} From 927fe9039d1dca052e96cbfcdd3db380fe49b672 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 13 Mar 2024 20:50:03 -0400 Subject: [PATCH 225/777] handle airgap when detections not enabled --- salt/soc/merged.map.jinja | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/salt/soc/merged.map.jinja b/salt/soc/merged.map.jinja index 2012917af..57abe7a48 100644 --- a/salt/soc/merged.map.jinja +++ b/salt/soc/merged.map.jinja @@ -35,18 +35,18 @@ {% do SOCMERGED.config.server.modules.pop('elastalertengine') %} {% do SOCMERGED.config.server.modules.pop('strelkaengine') %} {% do SOCMERGED.config.server.modules.pop('suricataengine') %} +{% elif pillar.global.airgap %} + {# if system is Airgap, don't autoupdate Yara & Sigma rules #} + {% do SOCMERGED.config.server.modules.elastalertengine.update({'autoUpdateEnabled': false}) %} + {% do SOCMERGED.config.server.modules.strelkaengine.update({'autoUpdateEnabled': false}) %} +{% endif %} + {% endif %} {% if pillar.manager.playbook == 0 %} {% do SOCMERGED.config.server.client.inactiveTools.append('toolPlaybook') %} {% endif %} -{# if system is Airgap, don't autoupdate Yara & Sigma rules #} -{% if pillar.global.airgap %} - {% do SOCMERGED.config.server.modules.elastalertengine.update({'autoUpdateEnabled': false}) %} - {% do SOCMERGED.config.server.modules.strelkaengine.update({'autoUpdateEnabled': false}) %} -{% endif %} - {% set standard_actions = SOCMERGED.config.pop('actions') %} {% if pillar.global.endgamehost != '' %} From 844cfe55cd0ea40317a51f9cee33a801d690d647 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 13 Mar 2024 20:52:17 -0400 Subject: [PATCH 226/777] handle airgap when detections not enabled --- salt/soc/merged.map.jinja | 2 -- 1 file changed, 2 deletions(-) diff --git a/salt/soc/merged.map.jinja b/salt/soc/merged.map.jinja index 57abe7a48..c22ed2210 100644 --- a/salt/soc/merged.map.jinja +++ b/salt/soc/merged.map.jinja @@ -41,8 +41,6 @@ {% do SOCMERGED.config.server.modules.strelkaengine.update({'autoUpdateEnabled': false}) %} {% endif %} -{% endif %} - {% if pillar.manager.playbook == 0 %} {% do SOCMERGED.config.server.client.inactiveTools.append('toolPlaybook') %} {% endif %} From 284e0d84354a0d8b769a4582d0c68e52b749e669 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 14 Mar 2024 11:33:47 -0400 Subject: [PATCH 227/777] Update soc_suricata.yaml --- salt/suricata/soc_suricata.yaml | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/salt/suricata/soc_suricata.yaml b/salt/suricata/soc_suricata.yaml index c61c04123..47e9e1503 100644 --- a/salt/suricata/soc_suricata.yaml +++ b/salt/suricata/soc_suricata.yaml @@ -28,19 +28,16 @@ suricata: description: Size in GB for total usage size of PCAP on disk. helplink: suricata.html compression: - description: Enable compression of Suricata PCAP. Currently unsupported + description: Enable compression of Suricata PCAP. advanced: True - readonly: True helpLink: suricata.html lz4-checksum: - description: Enable PCAP lz4 checksum. Currently unsupported + description: Enable PCAP lz4 checksum. advanced: True - readonly: True helpLink: suricata.html lz4-level: description: lz4 compression level of PCAP. 0 for no compression 16 for max compression. Currently unsupported advanced: True - readonly: True helpLink: suricata.html filename: description: Filename output for Suricata PCAP. From fd835f63947b152d3390c3b6f866c9f0e7c338a2 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 14 Mar 2024 11:36:45 -0400 Subject: [PATCH 228/777] Update soc_suricata.yaml --- salt/suricata/soc_suricata.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/suricata/soc_suricata.yaml b/salt/suricata/soc_suricata.yaml index 47e9e1503..7decaa6d3 100644 --- a/salt/suricata/soc_suricata.yaml +++ b/salt/suricata/soc_suricata.yaml @@ -36,7 +36,7 @@ suricata: advanced: True helpLink: suricata.html lz4-level: - description: lz4 compression level of PCAP. 0 for no compression 16 for max compression. Currently unsupported + description: lz4 compression level of PCAP. 0 for no compression 16 for max compression. advanced: True helpLink: suricata.html filename: From af5b3feb96e657dc256508b81166f25576894d45 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Mon, 18 Mar 2024 07:34:18 -0400 Subject: [PATCH 229/777] re-schedule lock jobs --- .github/workflows/close-threads.yml | 32 +++++++++++++++++++++++++++++ .github/workflows/lock-threads.yml | 19 +---------------- 2 files changed, 33 insertions(+), 18 deletions(-) create mode 100644 .github/workflows/close-threads.yml diff --git a/.github/workflows/close-threads.yml b/.github/workflows/close-threads.yml new file mode 100644 index 000000000..059a35a9f --- /dev/null +++ b/.github/workflows/close-threads.yml @@ -0,0 +1,32 @@ +name: 'Close Threads' + +on: + schedule: + - cron: '50 1 * * *' + workflow_dispatch: + +permissions: + issues: write + pull-requests: write + discussions: write + +concurrency: + group: lock-threads + +jobs: + close-threads: + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + steps: + - uses: actions/stale@v5 + with: + days-before-issue-stale: -1 + days-before-issue-close: 60 + stale-issue-message: "This issue is stale because it has been inactive for an extended period. Stale issues convey that the issue, while important to someone, is not critical enough for the author, or other community members to work on, sponsor, or otherwise shepherd the issue through to a resolution." + close-issue-message: "This issue was closed because it has been stale for an extended period. It will be automatically locked in 30 days, after which no further commenting will be available." + days-before-pr-stale: 45 + days-before-pr-close: 60 + stale-pr-message: "This PR is stale because it has been inactive for an extended period. The longer a PR remains stale the more out of date with the main branch it becomes." + close-pr-message: "This PR was closed because it has been stale for an extended period. It will be automatically locked in 30 days. If there is still a commitment to finishing this PR re-open it before it is locked." diff --git a/.github/workflows/lock-threads.yml b/.github/workflows/lock-threads.yml index 25e5d8c17..eeaa444ed 100644 --- a/.github/workflows/lock-threads.yml +++ b/.github/workflows/lock-threads.yml @@ -2,7 +2,7 @@ name: 'Lock Threads' on: schedule: - - cron: '50 1 * * *' + - cron: '50 2 * * *' workflow_dispatch: permissions: @@ -14,23 +14,6 @@ concurrency: group: lock-threads jobs: - close-threads: - runs-on: ubuntu-latest - permissions: - issues: write - pull-requests: write - steps: - - uses: actions/stale@v5 - with: - days-before-issue-stale: -1 - days-before-issue-close: 60 - stale-issue-message: "This issue is stale because it has been inactive for an extended period. Stale issues convey that the issue, while important to someone, is not critical enough for the author, or other community members to work on, sponsor, or otherwise shepherd the issue through to a resolution." - close-issue-message: "This issue was closed because it has been stale for an extended period. It will be automatically locked in 30 days, after which no further commenting will be available." - days-before-pr-stale: 45 - days-before-pr-close: 60 - stale-pr-message: "This PR is stale because it has been inactive for an extended period. The longer a PR remains stale the more out of date with the main branch it becomes." - close-pr-message: "This PR was closed because it has been stale for an extended period. It will be automatically locked in 30 days. If there is still a commitment to finishing this PR re-open it before it is locked." - lock-threads: runs-on: ubuntu-latest steps: From c6df805556ca56e3d6ca252f01ea5dc6cb274e40 Mon Sep 17 00:00:00 2001 From: Wes Date: Mon, 18 Mar 2024 14:53:36 +0000 Subject: [PATCH 230/777] Add SOC template --- salt/elasticsearch/defaults.yaml | 107 +++++++++++++++++++++++++++++++ 1 file changed, 107 insertions(+) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index 2274018b1..c70b0419a 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -227,6 +227,113 @@ elasticsearch: sort: field: '@timestamp' order: desc + so-logs-soc: + close: 30 + delete: 365 + index_sorting: false + index_template: + composed_of: + - agent-mappings + - dtc-agent-mappings + - base-mappings + - dtc-base-mappings + - client-mappings + - dtc-client-mappings + - container-mappings + - destination-mappings + - dtc-destination-mappings + - pb-override-destination-mappings + - dll-mappings + - dns-mappings + - dtc-dns-mappings + - ecs-mappings + - dtc-ecs-mappings + - error-mappings + - event-mappings + - dtc-event-mappings + - file-mappings + - dtc-file-mappings + - group-mappings + - host-mappings + - dtc-host-mappings + - http-mappings + - dtc-http-mappings + - log-mappings + - network-mappings + - dtc-network-mappings + - observer-mappings + - dtc-observer-mappings + - organization-mappings + - package-mappings + - process-mappings + - dtc-process-mappings + - related-mappings + - rule-mappings + - dtc-rule-mappings + - server-mappings + - service-mappings + - dtc-service-mappings + - source-mappings + - dtc-source-mappings + - pb-override-source-mappings + - threat-mappings + - tls-mappings + - url-mappings + - user_agent-mappings + - dtc-user_agent-mappings + - common-settings + - common-dynamic-mappings + data_stream: {} + index_patterns: + - logs-soc-so* + priority: 500 + template: + mappings: + date_detection: false + dynamic_templates: + - strings_as_keyword: + mapping: + ignore_above: 1024 + type: keyword + match_mapping_type: string + settings: + index: + lifecycle: + name: so-soc-logs + mapping: + total_fields: + limit: 5000 + number_of_replicas: 0 + number_of_shards: 1 + refresh_interval: 30s + sort: + field: '@timestamp' + order: desc + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + warm: 7 so-common: close: 30 delete: 365 From 020eb47026d57136a2d12d80dfa2e32629e48008 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Tue, 19 Mar 2024 13:53:37 -0400 Subject: [PATCH 231/777] Change Detections defaults --- salt/soc/defaults.yaml | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index de372a98f..6c8234b9a 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1080,7 +1080,7 @@ soc: elastalertengine: allowRegex: '' autoUpdateEnabled: false - communityRulesImportFrequencySeconds: 86400 + communityRulesImportFrequencySeconds: 180 denyRegex: '' elastAlertRulesFolder: /opt/sensoroni/elastalert rulesFingerprintFile: /opt/sensoroni/fingerprints/sigma.fingerprint @@ -1132,8 +1132,9 @@ soc: strelkaengine: allowRegex: '' autoUpdateEnabled: false + communityRulesImportFrequencySeconds: 180 compileYaraPythonScriptPath: /opt/so/conf/strelka/compile_yara.py - denyRegex: '.*' + denyRegex: '' reposFolder: /opt/sensoroni/yara/repos rulesRepos: - repo: https://github.com/Security-Onion-Solutions/securityonion-yara @@ -1141,8 +1142,10 @@ soc: yaraRulesFolder: /opt/sensoroni/yara/rules suricataengine: allowRegex: '' + autoUpdateEnabled: false + communityRulesImportFrequencySeconds: 180 communityRulesFile: /nsm/rules/suricata/emerging-all.rules - denyRegex: '.*' + denyRegex: '' rulesFingerprintFile: /opt/sensoroni/fingerprints/emerging-all.fingerprint client: enableReverseLookup: false From d84af803a66c99449c4900fa243768d586c4753f Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Wed, 20 Mar 2024 08:48:31 -0400 Subject: [PATCH 232/777] Enable Autoupdates --- salt/soc/defaults.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 6c8234b9a..8defda0dd 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1079,7 +1079,7 @@ soc: hostUrl: elastalertengine: allowRegex: '' - autoUpdateEnabled: false + autoUpdateEnabled: true communityRulesImportFrequencySeconds: 180 denyRegex: '' elastAlertRulesFolder: /opt/sensoroni/elastalert @@ -1131,7 +1131,7 @@ soc: - rbac/users_roles strelkaengine: allowRegex: '' - autoUpdateEnabled: false + autoUpdateEnabled: true communityRulesImportFrequencySeconds: 180 compileYaraPythonScriptPath: /opt/so/conf/strelka/compile_yara.py denyRegex: '' @@ -1142,7 +1142,7 @@ soc: yaraRulesFolder: /opt/sensoroni/yara/rules suricataengine: allowRegex: '' - autoUpdateEnabled: false + autoUpdateEnabled: true communityRulesImportFrequencySeconds: 180 communityRulesFile: /nsm/rules/suricata/emerging-all.rules denyRegex: '' From bb3bbd749c65ee45489b847a587e6be25a419d8a Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 20 Mar 2024 10:20:04 -0400 Subject: [PATCH 233/777] 2.4.260 --- DOWNLOAD_AND_VERIFY_ISO.md | 22 ++++++++++----------- sigs/securityonion-2.4.60-20240320.iso.sig | Bin 0 -> 566 bytes 2 files changed, 11 insertions(+), 11 deletions(-) create mode 100644 sigs/securityonion-2.4.60-20240320.iso.sig diff --git a/DOWNLOAD_AND_VERIFY_ISO.md b/DOWNLOAD_AND_VERIFY_ISO.md index a23d88d4d..4493f210d 100644 --- a/DOWNLOAD_AND_VERIFY_ISO.md +++ b/DOWNLOAD_AND_VERIFY_ISO.md @@ -1,17 +1,17 @@ -### 2.4.50-20240220 ISO image released on 2024/02/20 +### 2.4.60-20240320 ISO image released on 2024/03/20 ### Download and Verify -2.4.50-20240220 ISO image: -https://download.securityonion.net/file/securityonion/securityonion-2.4.50-20240220.iso +2.4.60-20240320 ISO image: +https://download.securityonion.net/file/securityonion/securityonion-2.4.60-20240320.iso -MD5: BCA6476EF1BF79773D8EFB11700FDE8E -SHA1: 9FF0A304AA368BCD2EF2BE89AD47E65650241927 -SHA256: 49D7695EFFF6F3C4840079BF564F3191B585639816ADE98672A38017F25E9570 +MD5: 178DD42D06B2F32F3870E0C27219821E +SHA1: 73EDCD50817A7F6003FE405CF1808A30D034F89D +SHA256: DD334B8D7088A7B78160C253B680D645E25984BA5CCAB5CC5C327CA72137FC06 Signature for ISO image: -https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.50-20240220.iso.sig +https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.60-20240320.iso.sig Signing key: https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS @@ -25,22 +25,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2. Download the signature file for the ISO: ``` -wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.50-20240220.iso.sig +wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.60-20240320.iso.sig ``` Download the ISO image: ``` -wget https://download.securityonion.net/file/securityonion/securityonion-2.4.50-20240220.iso +wget https://download.securityonion.net/file/securityonion/securityonion-2.4.60-20240320.iso ``` Verify the downloaded ISO image using the signature file: ``` -gpg --verify securityonion-2.4.50-20240220.iso.sig securityonion-2.4.50-20240220.iso +gpg --verify securityonion-2.4.60-20240320.iso.sig securityonion-2.4.60-20240320.iso ``` The output should show "Good signature" and the Primary key fingerprint should match what's shown below: ``` -gpg: Signature made Fri 16 Feb 2024 11:36:25 AM EST using RSA key ID FE507013 +gpg: Signature made Tue 19 Mar 2024 03:17:58 PM EDT using RSA key ID FE507013 gpg: Good signature from "Security Onion Solutions, LLC " gpg: WARNING: This key is not certified with a trusted signature! gpg: There is no indication that the signature belongs to the owner. diff --git a/sigs/securityonion-2.4.60-20240320.iso.sig b/sigs/securityonion-2.4.60-20240320.iso.sig new file mode 100644 index 0000000000000000000000000000000000000000..c0129ab64b049b783258081c8d3dde117b29aa95 GIT binary patch literal 566 zcmV-60?GY}0y6{v0SEvc79j-41gSkXz6^6dp_W8^5Ma0dP;e6k0%iH+<^T!_5PT3| zxBgIY6FsdE0HMAsL4Ya2;dUH@Tj-=rYUOi%4z*NE^@q+`;Erl=A8#u|b^A0~xO#6`Szzg5~-3VjT3Yys$sGkuxpcsZ@^e5$v7pXryod-7`cIChZTfeuV& zKIlAHEhH+#f`k%z&qebjUlS~w3XH&a0N!TFO3A(4TzcH@Bj#jfvyTbV)Eyc8J;5eF zL9BEEpso{FY9)Vhc3*2LAuZ&Xo~l7+Jb{y4Qr$U&^WQTRe+g57T7=d8`o!9)8dp2s zgkP&e^vc+xnu!=S9`UpxO0R#HROR;d$ZLxxt`ZVR0Ne_ywWWs$w3}i184t&>AnQ#OU-=HH6mAF^^|dFa z>sH%7KeQ%8Gt+!Vx#jt6YUO&Y^g8?yMOnxK9R2w4q_eENgMup&e%TRy7{8${`LFyP$=x?8XlQVruAYhoUiD`V~ehpF literal 0 HcmV?d00001 From d4d17e1835b32060c71903dde5dd5446424efaa7 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 20 Mar 2024 11:04:40 -0400 Subject: [PATCH 234/777] Update VERSION --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 5a99ed019..b3c5d8c27 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.4.60 +2.4.70 From 876690a9f61269345b0c81a12f4717b857c0ab75 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Wed, 20 Mar 2024 15:49:46 -0400 Subject: [PATCH 235/777] FIX: Annotations for BPF and Suricata PCAP #12626 --- salt/bpf/soc_bpf.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/bpf/soc_bpf.yaml b/salt/bpf/soc_bpf.yaml index 379eaa022..d93ec98fd 100644 --- a/salt/bpf/soc_bpf.yaml +++ b/salt/bpf/soc_bpf.yaml @@ -1,6 +1,6 @@ bpf: pcap: - description: List of BPF filters to apply to PCAP. + description: List of BPF filters to apply to Stenographer. multiline: True forcedType: "[]string" helpLink: bpf.html From d2fb067110facbac8bcb228e2bc99cdd6650c690 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Wed, 20 Mar 2024 15:57:32 -0400 Subject: [PATCH 236/777] FIX: Annotations for BPF and Suricata PCAP #12626 --- salt/suricata/soc_suricata.yaml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/salt/suricata/soc_suricata.yaml b/salt/suricata/soc_suricata.yaml index 7decaa6d3..34c9b6269 100644 --- a/salt/suricata/soc_suricata.yaml +++ b/salt/suricata/soc_suricata.yaml @@ -21,12 +21,12 @@ suricata: helpLink: suricata.html pcap: filesize: - description: Max file size for individual PCAP files written by Suricata. Increasing this number could improve write performance at the expense of pcap retrieval times. + description: Maximum file size for individual PCAP files written by Suricata. Increasing this number could improve write performance at the expense of pcap retrieval time. advanced: True - helplink: suricata.html + helpLink: suricata.html maxsize: - description: Size in GB for total usage size of PCAP on disk. - helplink: suricata.html + description: Maximum disk usage in GB for all PCAP written by Suricata. + helpLink: suricata.html compression: description: Enable compression of Suricata PCAP. advanced: True @@ -36,7 +36,7 @@ suricata: advanced: True helpLink: suricata.html lz4-level: - description: lz4 compression level of PCAP. 0 for no compression 16 for max compression. + description: lz4 compression level of PCAP. 0 for no compression. 16 for maximum compression. advanced: True helpLink: suricata.html filename: @@ -50,13 +50,13 @@ suricata: readonly: True helpLink: suricata.html use-stream-depth: - description: Set to "no" to ignore the stream depth and capture the entire flow. Set this to "yes" to truncate the flow based on the stream depth. + description: Set to "no" to ignore the stream depth and capture the entire flow. Set to "yes" to truncate the flow based on the stream depth. advanced: True regex: ^(yes|no)$ regexFailureMessage: You must enter either yes or no. helpLink: suricata.html conditional: - description: Set to "all" to capture PCAP for all flows. Set to "alerts" to capture PCAP just for alerts or set to "tag" to capture PCAP for just tagged rules. + description: Set to "all" to record PCAP for all flows. Set to "alerts" to record PCAP just for alerts. Set to "tag" to record PCAP for just tagged rules. regex: ^(all|alerts|tag)$ regexFailureMessage: You must enter either all, alert or tag. helpLink: suricata.html From fff4d20e39c8eae18a296cad442910c843a49b3a Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Wed, 20 Mar 2024 16:03:45 -0400 Subject: [PATCH 237/777] Update soc_suricata.yaml --- salt/suricata/soc_suricata.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/suricata/soc_suricata.yaml b/salt/suricata/soc_suricata.yaml index 34c9b6269..806033483 100644 --- a/salt/suricata/soc_suricata.yaml +++ b/salt/suricata/soc_suricata.yaml @@ -25,7 +25,7 @@ suricata: advanced: True helpLink: suricata.html maxsize: - description: Maximum disk usage in GB for all PCAP written by Suricata. + description: Maximum size in GB for total disk usage of all PCAP written by Suricata. helpLink: suricata.html compression: description: Enable compression of Suricata PCAP. @@ -36,7 +36,7 @@ suricata: advanced: True helpLink: suricata.html lz4-level: - description: lz4 compression level of PCAP. 0 for no compression. 16 for maximum compression. + description: lz4 compression level of PCAP. Set to 0 for no compression. Set to 16 for maximum compression. advanced: True helpLink: suricata.html filename: @@ -50,7 +50,7 @@ suricata: readonly: True helpLink: suricata.html use-stream-depth: - description: Set to "no" to ignore the stream depth and capture the entire flow. Set to "yes" to truncate the flow based on the stream depth. + description: Set to "no" to ignore the stream depth and capture the entire flow. Set to "yes" to truncate the flow based on the stream depth. advanced: True regex: ^(yes|no)$ regexFailureMessage: You must enter either yes or no. From f3b921342ef5f6777ed230d87f17ccfbed309502 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Wed, 20 Mar 2024 16:06:25 -0400 Subject: [PATCH 238/777] FIX: Annotations for BPF and Suricata PCAP #12626 --- salt/suricata/soc_suricata.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/salt/suricata/soc_suricata.yaml b/salt/suricata/soc_suricata.yaml index 806033483..13a709c4a 100644 --- a/salt/suricata/soc_suricata.yaml +++ b/salt/suricata/soc_suricata.yaml @@ -25,10 +25,10 @@ suricata: advanced: True helpLink: suricata.html maxsize: - description: Maximum size in GB for total disk usage of all PCAP written by Suricata. + description: Maximum size in GB for total disk usage of all PCAP files written by Suricata. helpLink: suricata.html compression: - description: Enable compression of Suricata PCAP. + description: Enable compression of Suricata PCAP files. advanced: True helpLink: suricata.html lz4-checksum: @@ -36,11 +36,11 @@ suricata: advanced: True helpLink: suricata.html lz4-level: - description: lz4 compression level of PCAP. Set to 0 for no compression. Set to 16 for maximum compression. + description: lz4 compression level of PCAP files. Set to 0 for no compression. Set to 16 for maximum compression. advanced: True helpLink: suricata.html filename: - description: Filename output for Suricata PCAP. + description: Filename output for Suricata PCAP files. advanced: True readonly: True helpLink: suricata.html @@ -56,7 +56,7 @@ suricata: regexFailureMessage: You must enter either yes or no. helpLink: suricata.html conditional: - description: Set to "all" to record PCAP for all flows. Set to "alerts" to record PCAP just for alerts. Set to "tag" to record PCAP for just tagged rules. + description: Set to "all" to record PCAP for all flows. Set to "alerts" to only record PCAP for Suricata alerts. Set to "tag" to only record PCAP for tagged rules. regex: ^(all|alerts|tag)$ regexFailureMessage: You must enter either all, alert or tag. helpLink: suricata.html From 778997bed46fc77b876328a6b98ebb477fe97082 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Wed, 20 Mar 2024 17:07:37 -0400 Subject: [PATCH 239/777] FEATURE: Add Events column layout for event.module system #12628 --- salt/soc/defaults.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index de372a98f..30149fdfd 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1064,6 +1064,15 @@ soc: - event.action - event.outcome - event.dataset + ':system:': + - soc_timestamp + - process.name + - process.pid + - user.effective.name + - user.name + - system.auth.sudo.command + - event.dataset + - message server: bindAddress: 0.0.0.0:9822 baseUrl: / From 4a33234c34f50d73c45b4fd23946e33948d53a5e Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Thu, 21 Mar 2024 07:26:19 -0400 Subject: [PATCH 240/777] Default update to 24 hours --- salt/soc/defaults.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 8defda0dd..8b78f2e91 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1080,7 +1080,7 @@ soc: elastalertengine: allowRegex: '' autoUpdateEnabled: true - communityRulesImportFrequencySeconds: 180 + communityRulesImportFrequencySeconds: 86400 denyRegex: '' elastAlertRulesFolder: /opt/sensoroni/elastalert rulesFingerprintFile: /opt/sensoroni/fingerprints/sigma.fingerprint @@ -1132,7 +1132,7 @@ soc: strelkaengine: allowRegex: '' autoUpdateEnabled: true - communityRulesImportFrequencySeconds: 180 + communityRulesImportFrequencySeconds: 86400 compileYaraPythonScriptPath: /opt/so/conf/strelka/compile_yara.py denyRegex: '' reposFolder: /opt/sensoroni/yara/repos @@ -1143,7 +1143,7 @@ soc: suricataengine: allowRegex: '' autoUpdateEnabled: true - communityRulesImportFrequencySeconds: 180 + communityRulesImportFrequencySeconds: 86400 communityRulesFile: /nsm/rules/suricata/emerging-all.rules denyRegex: '' rulesFingerprintFile: /opt/sensoroni/fingerprints/emerging-all.fingerprint From f431e9ae08310963a75a20e56682da7378ff1f18 Mon Sep 17 00:00:00 2001 From: weslambert Date: Thu, 21 Mar 2024 10:06:25 -0400 Subject: [PATCH 241/777] Remove Strelka config --- salt/manager/init.sls | 45 ------------------------------------------- 1 file changed, 45 deletions(-) diff --git a/salt/manager/init.sls b/salt/manager/init.sls index c62a41999..0ff4fa85a 100644 --- a/salt/manager/init.sls +++ b/salt/manager/init.sls @@ -117,51 +117,6 @@ rules_dir: - group: socore - makedirs: True -{% if STRELKAMERGED.rules.enabled %} -strelkarepos: - file.managed: - - name: /opt/so/conf/strelka/repos.txt - - source: salt://strelka/rules/repos.txt.jinja - - template: jinja - - defaults: - STRELKAREPOS: {{ STRELKAMERGED.rules.repos }} - - makedirs: True -strelka-yara-update: - {% if MANAGERMERGED.reposync.enabled and not GLOBALS.airgap %} - cron.present: - {% else %} - cron.absent: - {% endif %} - - user: socore - - name: '/usr/sbin/so-yara-update >> /opt/so/log/yarasync/yara-update.log 2>&1' - - identifier: strelka-yara-update - - hour: '7' - - minute: '1' -strelka-yara-download: - {% if MANAGERMERGED.reposync.enabled and not GLOBALS.airgap %} - cron.present: - {% else %} - cron.absent: - {% endif %} - - user: socore - - name: '/usr/sbin/so-yara-download >> /opt/so/log/yarasync/yara-download.log 2>&1' - - identifier: strelka-yara-download - - hour: '7' - - minute: '1' -{% if not GLOBALS.airgap %} -update_yara_rules: - cmd.run: - - name: /usr/sbin/so-yara-update - - onchanges: - - file: yara_update_scripts -download_yara_rules: - cmd.run: - - name: /usr/sbin/so-yara-download - - onchanges: - - file: yara_update_scripts -{% endif %} -{% endif %} - {% else %} {{sls}}_state_not_allowed: From 1568f57096b8e3ab68ff79ec0277ce6c0f82271e Mon Sep 17 00:00:00 2001 From: weslambert Date: Thu, 21 Mar 2024 10:07:27 -0400 Subject: [PATCH 242/777] Remove Strelka config --- salt/strelka/backend/config.sls | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/salt/strelka/backend/config.sls b/salt/strelka/backend/config.sls index db18a68cc..b39e06ac8 100644 --- a/salt/strelka/backend/config.sls +++ b/salt/strelka/backend/config.sls @@ -50,16 +50,6 @@ backend_taste: - user: 939 - group: 939 -{% if STRELKAMERGED.rules.enabled %} -strelkarules: - file.recurse: - - name: /opt/so/conf/strelka/rules - - source: salt://strelka/rules - - user: 939 - - group: 939 - - clean: True -{% endif %} - {% else %} {{sls}}_state_not_allowed: From 8429a364dc8483fcb3ee15681843de1b2a16bec3 Mon Sep 17 00:00:00 2001 From: weslambert Date: Thu, 21 Mar 2024 10:09:36 -0400 Subject: [PATCH 243/777] Remove Strelka rules watch --- salt/strelka/backend/enabled.sls | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/strelka/backend/enabled.sls b/salt/strelka/backend/enabled.sls index fc56f4197..0df764a6e 100644 --- a/salt/strelka/backend/enabled.sls +++ b/salt/strelka/backend/enabled.sls @@ -42,8 +42,8 @@ strelka_backend: {% endfor %} {% endif %} - restart_policy: on-failure - - watch: - - file: strelkarules + #- watch: + #- file: strelkarules delete_so-strelka-backend_so-status.disabled: file.uncomment: From 486a633dfeeb1244df0fe77d76a723997f58d073 Mon Sep 17 00:00:00 2001 From: Wes Date: Thu, 21 Mar 2024 20:07:59 +0000 Subject: [PATCH 244/777] Add pfsense Suricata config --- salt/elasticsearch/files/ingest-dynamic/common | 5 +++-- salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 | 2 +- salt/elasticsearch/files/ingest/suricata.common | 1 - 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/elasticsearch/files/ingest-dynamic/common b/salt/elasticsearch/files/ingest-dynamic/common index b2c13f983..94f5ffef7 100644 --- a/salt/elasticsearch/files/ingest-dynamic/common +++ b/salt/elasticsearch/files/ingest-dynamic/common @@ -59,8 +59,9 @@ { "convert": { "field": "event.severity", "type": "integer", "ignore_failure": true, "ignore_missing": true } }, { "set": { "field": "event.dataset", "ignore_empty_value":true, "copy_from": "event.dataset_temp" }}, { "set": { "if": "ctx.event?.dataset != null && !ctx.event.dataset.contains('.')", "field": "event.dataset", "value": "{{event.module}}.{{event.dataset}}" } }, - { "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "dataset_tag_temp" } }, - { "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp.1}}" }}, + { "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "dataset_tag_temp" } }, + { "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp.1}}" }}, + { "grok": { "if": "ctx.http?.response?.status_code != null", "field": "http.response.status_code", "patterns": ["%{NUMBER:http.response.status_code:long} %{GREEDYDATA}"]}}, { "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "dataset_tag_temp", "event.dataset_temp" ], "ignore_missing": true, "ignore_failure": true } } {%- endraw %} {%- if HIGHLANDER %} diff --git a/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 b/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 index 2ecbc3989..18d078244 100644 --- a/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 +++ b/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 @@ -68,7 +68,7 @@ "field": "_security", "ignore_missing": true } - }, + }, { "set": { "ignore_failure": true, "field": "event.module", "value": "elastic_agent" } }, { "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "module_temp" } }, { "set": { "if": "ctx.module_temp != null", "override": true, "field": "event.module", "value": "{{module_temp.0}}" } }, diff --git a/salt/elasticsearch/files/ingest/suricata.common b/salt/elasticsearch/files/ingest/suricata.common index 6aec40a2b..8143882c7 100644 --- a/salt/elasticsearch/files/ingest/suricata.common +++ b/salt/elasticsearch/files/ingest/suricata.common @@ -13,7 +13,6 @@ { "rename": { "field": "message2.vlan", "target_field": "network.vlan.id", "ignore_failure": true } }, { "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_missing": true } }, { "rename": { "field": "message2.xff", "target_field": "xff.ip", "ignore_missing": true } }, - { "lowercase": { "field": "network.transport", "ignore_failure": true } }, { "set": { "field": "event.dataset", "value": "{{ message2.event_type }}" } }, { "set": { "field": "observer.name", "value": "{{agent.name}}" } }, { "set": { "field": "event.ingested", "value": "{{@timestamp}}" } }, From 5934829e0ddf4bd20f8fc8eb82abb1929ac8cfe4 Mon Sep 17 00:00:00 2001 From: Wes Date: Thu, 21 Mar 2024 20:08:33 +0000 Subject: [PATCH 245/777] Include pfsense config --- .../files/ingest/logs-pfsense.log-1.16.0 | 389 ++++++++++++++++++ .../ingest/logs-pfsense.log-1.16.0-suricata | 31 ++ 2 files changed, 420 insertions(+) create mode 100644 salt/elasticsearch/files/ingest/logs-pfsense.log-1.16.0 create mode 100644 salt/elasticsearch/files/ingest/logs-pfsense.log-1.16.0-suricata diff --git a/salt/elasticsearch/files/ingest/logs-pfsense.log-1.16.0 b/salt/elasticsearch/files/ingest/logs-pfsense.log-1.16.0 new file mode 100644 index 000000000..f53abb0e3 --- /dev/null +++ b/salt/elasticsearch/files/ingest/logs-pfsense.log-1.16.0 @@ -0,0 +1,389 @@ +{ + "description": "Pipeline for PFsense", + "processors": [ + { + "set": { + "field": "ecs.version", + "value": "8.10.0" + } + }, + { + "set": { + "field": "observer.vendor", + "value": "netgate" + } + }, + { + "set": { + "field": "observer.type", + "value": "firewall" + } + }, + { + "rename": { + "field": "message", + "target_field": "event.original" + } + }, + { + "set": { + "field": "event.kind", + "value": "event" + } + }, + { + "set": { + "field": "event.timezone", + "value": "{{_tmp.tz_offset}}", + "if": "ctx._tmp?.tz_offset != null && ctx._tmp?.tz_offset != 'local'" + } + }, + { + "grok": { + "description": "Parse syslog header", + "field": "event.original", + "patterns": [ + "^(%{ECS_SYSLOG_PRI})?%{TIMESTAMP} %{GREEDYDATA:message}" + ], + "pattern_definitions": { + "ECS_SYSLOG_PRI": "<%{NONNEGINT:log.syslog.priority:long}>(\\d )?", + "TIMESTAMP": "(?:%{BSD_TIMESTAMP_FORMAT}|%{SYSLOG_TIMESTAMP_FORMAT})", + "BSD_TIMESTAMP_FORMAT": "%{SYSLOGTIMESTAMP:_tmp.timestamp}(%{SPACE}%{BSD_PROCNAME}|%{SPACE}%{OBSERVER}%{SPACE}%{BSD_PROCNAME})(\\[%{POSINT:process.pid:long}\\])?:", + "BSD_PROCNAME": "(?:\\b%{NAME:process.name}|\\(%{NAME:process.name}\\))", + "NAME": "[[[:alnum:]]_-]+", + "SYSLOG_TIMESTAMP_FORMAT": "%{TIMESTAMP_ISO8601:_tmp.timestamp8601}%{SPACE}%{OBSERVER}%{SPACE}%{PROCESS}%{SPACE}(%{POSINT:process.pid:long}|-) - (-|%{META})", + "TIMESTAMP_ISO8601": "%{YEAR}-%{MONTHNUM}-%{MONTHDAY}[T ]%{HOUR}:?%{MINUTE}(?::?%{SECOND})?%{ISO8601_TIMEZONE:event.timezone}?", + "OBSERVER": "(?:%{IP:observer.ip}|%{HOSTNAME:observer.name})", + "PROCESS": "(\\(%{DATA:process.name}\\)|(?:%{UNIXPATH}*/)?%{BASEPATH:process.name})", + "BASEPATH": "[[[:alnum:]]_%!$@:.,+~-]+", + "META": "\\[[^\\]]*\\]" + } + } + }, + { + "date": { + "if": "ctx._tmp.timestamp8601 != null", + "field": "_tmp.timestamp8601", + "target_field": "@timestamp", + "formats": [ + "ISO8601" + ] + } + }, + { + "date": { + "if": "ctx.event?.timezone != null && ctx._tmp?.timestamp != null", + "field": "_tmp.timestamp", + "target_field": "@timestamp", + "formats": [ + "MMM d HH:mm:ss", + "MMM d HH:mm:ss", + "MMM dd HH:mm:ss" + ], + "timezone": "{{ event.timezone }}" + } + }, + { + "grok": { + "description": "Set Event Provider", + "field": "process.name", + "patterns": [ + "^%{HYPHENATED_WORDS:event.provider}" + ], + "pattern_definitions": { + "HYPHENATED_WORDS": "\\b[A-Za-z0-9_]+(-[A-Za-z_]+)*\\b" + } + } + }, + { + "pipeline": { + "name": "logs-pfsense.log-1.16.0-firewall", + "if": "ctx.event.provider == 'filterlog'" + } + }, + { + "pipeline": { + "name": "logs-pfsense.log-1.16.0-openvpn", + "if": "ctx.event.provider == 'openvpn'" + } + }, + { + "pipeline": { + "name": "logs-pfsense.log-1.16.0-ipsec", + "if": "ctx.event.provider == 'charon'" + } + }, + { + "pipeline": { + "name": "logs-pfsense.log-1.16.0-dhcp", + "if": "[\"dhcpd\", \"dhclient\", \"dhcp6c\"].contains(ctx.event.provider)" + } + }, + { + "pipeline": { + "name": "logs-pfsense.log-1.16.0-unbound", + "if": "ctx.event.provider == 'unbound'" + } + }, + { + "pipeline": { + "name": "logs-pfsense.log-1.16.0-haproxy", + "if": "ctx.event.provider == 'haproxy'" + } + }, + { + "pipeline": { + "name": "logs-pfsense.log-1.16.0-php-fpm", + "if": "ctx.event.provider == 'php-fpm'" + } + }, + { + "pipeline": { + "name": "logs-pfsense.log-1.16.0-squid", + "if": "ctx.event.provider == 'squid'" + } + }, + { + "pipeline": { + "name": "logs-pfsense.log-1.16.0-suricata", + "if": "ctx.event.provider == 'suricata'" + } + }, + { + "drop": { + "if": "![\"filterlog\", \"openvpn\", \"charon\", \"dhcpd\", \"dhclient\", \"dhcp6c\", \"unbound\", \"haproxy\", \"php-fpm\", \"squid\", \"suricata\"].contains(ctx.event?.provider)" + } + }, + { + "append": { + "field": "event.category", + "value": "network", + "if": "ctx.network != null" + } + }, + { + "convert": { + "field": "source.address", + "target_field": "source.ip", + "type": "ip", + "ignore_failure": true, + "ignore_missing": true + } + }, + { + "convert": { + "field": "destination.address", + "target_field": "destination.ip", + "type": "ip", + "ignore_failure": true, + "ignore_missing": true + } + }, + { + "set": { + "field": "network.type", + "value": "ipv6", + "if": "ctx.source?.ip != null && ctx.source.ip.contains(\":\")" + } + }, + { + "set": { + "field": "network.type", + "value": "ipv4", + "if": "ctx.source?.ip != null && ctx.source.ip.contains(\".\")" + } + }, + { + "geoip": { + "field": "source.ip", + "target_field": "source.geo", + "ignore_missing": true + } + }, + { + "geoip": { + "field": "destination.ip", + "target_field": "destination.geo", + "ignore_missing": true + } + }, + { + "geoip": { + "ignore_missing": true, + "database_file": "GeoLite2-ASN.mmdb", + "field": "source.ip", + "target_field": "source.as", + "properties": [ + "asn", + "organization_name" + ] + } + }, + { + "geoip": { + "database_file": "GeoLite2-ASN.mmdb", + "field": "destination.ip", + "target_field": "destination.as", + "properties": [ + "asn", + "organization_name" + ], + "ignore_missing": true + } + }, + { + "rename": { + "field": "source.as.asn", + "target_field": "source.as.number", + "ignore_missing": true + } + }, + { + "rename": { + "field": "source.as.organization_name", + "target_field": "source.as.organization.name", + "ignore_missing": true + } + }, + { + "rename": { + "field": "destination.as.asn", + "target_field": "destination.as.number", + "ignore_missing": true + } + }, + { + "rename": { + "field": "destination.as.organization_name", + "target_field": "destination.as.organization.name", + "ignore_missing": true + } + }, + { + "community_id": { + "target_field": "network.community_id", + "ignore_failure": true + } + }, + { + "grok": { + "field": "observer.ingress.interface.name", + "patterns": [ + "%{DATA}.%{NONNEGINT:observer.ingress.vlan.id}" + ], + "ignore_missing": true, + "ignore_failure": true + } + }, + { + "set": { + "field": "network.vlan.id", + "copy_from": "observer.ingress.vlan.id", + "ignore_empty_value": true + } + }, + { + "append": { + "field": "related.ip", + "value": "{{destination.ip}}", + "allow_duplicates": false, + "if": "ctx.destination?.ip != null" + } + }, + { + "append": { + "field": "related.ip", + "value": "{{source.ip}}", + "allow_duplicates": false, + "if": "ctx.source?.ip != null" + } + }, + { + "append": { + "field": "related.ip", + "value": "{{source.nat.ip}}", + "allow_duplicates": false, + "if": "ctx.source?.nat?.ip != null" + } + }, + { + "append": { + "field": "related.hosts", + "value": "{{destination.domain}}", + "if": "ctx.destination?.domain != null" + } + }, + { + "append": { + "field": "related.user", + "value": "{{user.name}}", + "if": "ctx.user?.name != null" + } + }, + { + "set": { + "field": "network.direction", + "value": "{{network.direction}}bound", + "if": "ctx.network?.direction != null && ctx.network?.direction =~ /^(in|out)$/" + } + }, + { + "remove": { + "field": [ + "_tmp" + ], + "ignore_failure": true + } + }, + { + "script": { + "lang": "painless", + "description": "This script processor iterates over the whole document to remove fields with null values.", + "source": "void handleMap(Map map) {\n for (def x : map.values()) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n map.values().removeIf(v -> v == null || (v instanceof String && v == \"-\"));\n}\nvoid handleList(List list) {\n for (def x : list) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n}\nhandleMap(ctx);\n" + } + }, + { + "remove": { + "field": "event.original", + "if": "ctx.tags == null || !(ctx.tags.contains('preserve_original_event'))", + "ignore_failure": true, + "ignore_missing": true + } + }, + { + "pipeline": { + "name": "logs-pfsense.log@custom", + "ignore_missing_pipeline": true + } + } + ], + "on_failure": [ + { + "remove": { + "field": [ + "_tmp" + ], + "ignore_failure": true + } + }, + { + "set": { + "field": "event.kind", + "value": "pipeline_error" + } + }, + { + "append": { + "field": "error.message", + "value": "{{{ _ingest.on_failure_message }}}" + } + } + ], + "_meta": { + "managed_by": "fleet", + "managed": true, + "package": { + "name": "pfsense" + } + } +} diff --git a/salt/elasticsearch/files/ingest/logs-pfsense.log-1.16.0-suricata b/salt/elasticsearch/files/ingest/logs-pfsense.log-1.16.0-suricata new file mode 100644 index 000000000..4a00f498f --- /dev/null +++ b/salt/elasticsearch/files/ingest/logs-pfsense.log-1.16.0-suricata @@ -0,0 +1,31 @@ +{ + "description": "Pipeline for parsing PFsense Squid logs.", + "processors": [ + { + "pipeline": { + "name": "suricata.common" + } + } + ], + "on_failure": [ + { + "set": { + "field": "event.kind", + "value": "pipeline_error" + } + }, + { + "append": { + "field": "error.message", + "value": "{{{ _ingest.on_failure_message }}}" + } + } + ], + "_meta": { + "managed_by": "fleet", + "managed": true, + "package": { + "name": "pfsense" + } + } +} From f889a089bfd23208ed725d54af39be38cfc9b6b7 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Fri, 22 Mar 2024 09:48:27 -0400 Subject: [PATCH 246/777] disregard benign telegraf error --- salt/common/tools/sbin/so-log-check | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index b8c68ffa1..3bf2bc778 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -122,6 +122,7 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error while communicating" # Elasticsearch MS -> HN "sensor" temporarily unavailable EXCLUDED_ERRORS="$EXCLUDED_ERRORS|tls handshake error" # Docker registry container when new node comes onlines EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unable to get license information" # Logstash trying to contact ES before it's ready + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|process already finished" # Telegraf script finished just as the auto kill timeout kicked in fi if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then From 4e1543b6a889bbaf3ba4cac998337c05b2416435 Mon Sep 17 00:00:00 2001 From: weslambert Date: Fri, 22 Mar 2024 09:56:21 -0400 Subject: [PATCH 247/777] Get only code --- salt/elasticsearch/files/ingest-dynamic/common | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/salt/elasticsearch/files/ingest-dynamic/common b/salt/elasticsearch/files/ingest-dynamic/common index b2c13f983..836b8d4af 100644 --- a/salt/elasticsearch/files/ingest-dynamic/common +++ b/salt/elasticsearch/files/ingest-dynamic/common @@ -57,10 +57,11 @@ { "convert": { "field": "log.id.uid", "type": "string", "ignore_failure": true, "ignore_missing": true } }, { "convert": { "field": "agent.id", "type": "string", "ignore_failure": true, "ignore_missing": true } }, { "convert": { "field": "event.severity", "type": "integer", "ignore_failure": true, "ignore_missing": true } }, - { "set": { "field": "event.dataset", "ignore_empty_value":true, "copy_from": "event.dataset_temp" }}, + { "set": { "field": "event.dataset", "ignore_empty_value":true, "copy_from": "event.dataset_temp" } }, { "set": { "if": "ctx.event?.dataset != null && !ctx.event.dataset.contains('.')", "field": "event.dataset", "value": "{{event.module}}.{{event.dataset}}" } }, - { "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "dataset_tag_temp" } }, - { "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp.1}}" }}, + { "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "dataset_tag_temp" } }, + { "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp.1}}" } }, + { "grok": { "if": "ctx.http?.response?.status_code != null", "field": "http.response.status_code", "patterns": ["%{NUMBER:http.response.status_code:long} %{GREEDYDATA}"]} }, { "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "dataset_tag_temp", "event.dataset_temp" ], "ignore_missing": true, "ignore_failure": true } } {%- endraw %} {%- if HIGHLANDER %} From 5ca9ec4b17f0f1f7bdde1fe336d24299b711b192 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Fri, 22 Mar 2024 10:12:26 -0400 Subject: [PATCH 248/777] Enable Detections --- salt/soc/defaults.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 8b78f2e91..7d8d8dd25 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1157,7 +1157,7 @@ soc: tipTimeoutMs: 6000 cacheExpirationMs: 300000 casesEnabled: true - detectionsEnabled: false + detectionsEnabled: true inactiveTools: ['toolUnused'] tools: - name: toolKibana @@ -1881,8 +1881,9 @@ soc: default: - so_detection.title - so_detection.isEnabled - - so_detection.language - so_detection.severity + - so_detection.language + - so_detection.ruleset queries: - name: "All Detections" query: "_id:*" From a78a304d4f90ac00be74b3a019458aba4286a9fc Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Fri, 22 Mar 2024 13:19:31 -0400 Subject: [PATCH 249/777] FEATURE: Add event.dataset to all Events column layouts #12641 --- salt/soc/defaults.yaml | 97 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 95 insertions(+), 2 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 5905434ed..bab229aeb 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -92,6 +92,7 @@ soc: - http_request.headers.x-real-ip - identity_id - http_request.headers.user-agent + - event.dataset '::conn': - soc_timestamp - source.ip @@ -102,6 +103,7 @@ soc: - network.protocol - log.id.uid - network.community_id + - event.dataset '::dce_rpc': - soc_timestamp - source.ip @@ -112,6 +114,7 @@ soc: - dce_rpc.named_pipe - dce_rpc.operation - log.id.uid + - event.dataset '::dhcp': - soc_timestamp - client.address @@ -120,6 +123,7 @@ soc: - host.hostname - dhcp.message_types - log.id.uid + - event.dataset '::dnp3': - soc_timestamp - source.ip @@ -128,6 +132,7 @@ soc: - destination.port - dnp3.fc_reply - log.id.uid + - event.dataset '::dnp3_control': - soc_timestamp - source.ip @@ -137,6 +142,7 @@ soc: - dnp3.function_code - dnp3.block_type - log.id.uid + - event.dataset '::dnp3_objects': - soc_timestamp - source.ip @@ -146,6 +152,7 @@ soc: - dnp3.function_code - dnp3.object_type - log.id.uid + - event.dataset '::dns': - soc_timestamp - source.ip @@ -158,6 +165,7 @@ soc: - dns.response.code_name - log.id.uid - network.community_id + - event.dataset '::dpd': - soc_timestamp - source.ip @@ -168,6 +176,7 @@ soc: - observer.analyser - error.reason - log.id.uid + - event.dataset '::file': - soc_timestamp - source.ip @@ -178,6 +187,7 @@ soc: - file.bytes.total - log.id.fuid - log.id.uid + - event.dataset '::ftp': - soc_timestamp - source.ip @@ -190,6 +200,7 @@ soc: - ftp.reply_code - file.size - log.id.uid + - event.dataset '::http': - soc_timestamp - source.ip @@ -204,6 +215,7 @@ soc: - http.response.body.length - log.id.uid - network.community_id + - event.dataset '::intel': - soc_timestamp - source.ip @@ -214,6 +226,7 @@ soc: - intel.indicator_type - intel.seen_where - log.id.uid + - event.dataset '::irc': - soc_timestamp - source.ip @@ -226,6 +239,7 @@ soc: - irc.command.value - irc.command.info - log.id.uid + - event.dataset '::kerberos': - soc_timestamp - source.ip @@ -236,6 +250,7 @@ soc: - kerberos.service - kerberos.request_type - log.id.uid + - event.dataset '::modbus': - soc_timestamp - source.ip @@ -244,6 +259,7 @@ soc: - destination.port - modbus.function - log.id.uid + - event.dataset '::mysql': - soc_timestamp - source.ip @@ -255,6 +271,7 @@ soc: - mysql.success - mysql.response - log.id.uid + - event.dataset '::notice': - soc_timestamp - source.ip @@ -266,6 +283,7 @@ soc: - log.id.fuid - log.id.uid - network.community_id + - event.dataset '::ntlm': - soc_timestamp - source.ip @@ -278,6 +296,7 @@ soc: - ntlm.server.nb.name - ntlm.server.tree.name - log.id.uid + - event.dataset '::pe': - soc_timestamp - file.is_64bit @@ -286,6 +305,7 @@ soc: - file.os - file.subsystem - log.id.fuid + - event.dataset '::radius': - soc_timestamp - source.ip @@ -297,6 +317,7 @@ soc: - radius.framed_address - radius.reply_message - radius.result + - event.dataset '::rdp': - soc_timestamp - source.ip @@ -312,6 +333,7 @@ soc: - rdp.result - rdp.security_protocol - log.id.uid + - event.dataset '::rfb': - soc_timestamp - source.ip @@ -323,6 +345,7 @@ soc: - rfb.share_flag - rfb.desktop.name - log.id.uid + - event.dataset '::signatures': - soc_timestamp - source.ip @@ -336,6 +359,7 @@ soc: - signature_count - host.count - log.id.uid + - event.dataset '::sip': - soc_timestamp - source.ip @@ -353,6 +377,7 @@ soc: - sip.user_agent - sip.status_code - log.id.uid + - event.dataset '::smb_files': - soc_timestamp - source.ip @@ -366,6 +391,7 @@ soc: - file.size - file.prev_name - log.id.uid + - event.dataset '::smb_mapping': - soc_timestamp - source.ip @@ -376,6 +402,7 @@ soc: - smb.service - smb.share_type - log.id.uid + - event.dataset '::smtp': - soc_timestamp - source.ip @@ -388,6 +415,7 @@ soc: - smtp.useragent - log.id.uid - network.community_id + - event.dataset '::snmp': - soc_timestamp - source.ip @@ -397,6 +425,7 @@ soc: - snmp.community - snmp.version - log.id.uid + - event.dataset '::socks': - soc_timestamp - source.ip @@ -408,11 +437,13 @@ soc: - socks.request.port - socks.status - log.id.uid + - event.dataset '::software': - soc_timestamp - source.ip - software.name - software.type + - event.dataset '::ssh': - soc_timestamp - source.ip @@ -425,6 +456,7 @@ soc: - ssh.client - ssh.server - log.id.uid + - event.dataset '::ssl': - soc_timestamp - source.ip @@ -436,6 +468,7 @@ soc: - ssl.validation_status - ssl.version - log.id.uid + - event.dataset ':zeek:syslog': - soc_timestamp - source.ip @@ -446,6 +479,7 @@ soc: - network.protocol - syslog.severity - log.id.uid + - event.dataset '::tunnels': - soc_timestamp - source.ip @@ -455,6 +489,7 @@ soc: - tunnel_type - action - log.id.uid + - event.dataset '::weird': - soc_timestamp - source.ip @@ -463,6 +498,7 @@ soc: - destination.port - weird.name - log.id.uid + - event.dataset '::x509': - soc_timestamp - x509.certificate.subject @@ -470,6 +506,7 @@ soc: - x509.certificate.key.length - x509.certificate.issuer - log.id.fuid + - event.dataset '::firewall': - soc_timestamp - source.ip @@ -481,6 +518,7 @@ soc: - observer.ingress.interface.name - event.action - network.community_id + - event.dataset ':pfsense:': - soc_timestamp - source.ip @@ -492,6 +530,7 @@ soc: - observer.ingress.interface.name - event.action - network.community_id + - event.dataset ':osquery:': - soc_timestamp - source.ip @@ -499,9 +538,9 @@ soc: - destination.ip - destination.port - source.hostname - - event.dataset - process.executable - user.name + - event.dataset ':strelka:file': - soc_timestamp - file.name @@ -510,6 +549,7 @@ soc: - file.source - file.mime_type - log.id.fuid + - event.dataset ':suricata:': - soc_timestamp - source.ip @@ -521,9 +561,11 @@ soc: - event.severity_label - log.id.uid - network.community_id + - event.dataset ':windows_eventlog:': - soc_timestamp - user.name + - event.dataset ':elasticsearch:': - soc_timestamp - agent.name @@ -545,6 +587,7 @@ soc: - real_message - syslog.priority - syslog.application + - event.dataset ':aws:': - soc_timestamp - aws.cloudtrail.event_category @@ -556,6 +599,7 @@ soc: - user.name - source.ip - source.geo.region_iso_code + - event.dataset ':squid:': - soc_timestamp - url.original @@ -563,6 +607,7 @@ soc: - destination.geo.country_iso_code - user.name - source.ip + - event.dataset '::sysmon_operational': - soc_timestamp - event.action @@ -570,6 +615,7 @@ soc: - user.name - process.executable - process.pid + - event.dataset '::network_connection': - soc_timestamp - source.ip @@ -577,44 +623,50 @@ soc: - destination.ip - destination.port - source.hostname - - event.dataset - process.executable - user.name + - event.dataset '::process_terminated': - soc_timestamp - process.executable - process.pid - winlog.computer_name + - event.dataset '::file_create': - soc_timestamp - file.target - process.executable - process.pid - winlog.computer_name + - event.dataset '::registry_value_set': - soc_timestamp - winlog.event_data.TargetObject - process.executable - process.pid - winlog.computer_name + - event.dataset '::process_creation': - soc_timestamp - process.command_line - process.pid - process.parent.executable - process.working_directory + - event.dataset '::registry_create_delete': - soc_timestamp - winlog.event_data.TargetObject - process.executable - process.pid - winlog.computer_name + - event.dataset '::dns_query': - soc_timestamp - dns.query.name - dns.answers.name - process.executable - winlog.computer_name + - event.dataset '::file_create_stream_hash': - soc_timestamp - file.target @@ -623,6 +675,7 @@ soc: - process.executable - process.pid - winlog.computer_name + - event.dataset '::bacnet': - soc_timestamp - source.ip @@ -632,6 +685,7 @@ soc: - bacnet.bclv.function - bacnet.result.code - log.id.uid + - event.dataset '::bacnet_discovery': - soc_timestamp - source.ip @@ -641,6 +695,7 @@ soc: - bacnet.vendor - bacnet.pdu.service - log.id.uid + - event.dataset '::bacnet_property': - soc_timestamp - source.ip @@ -650,6 +705,7 @@ soc: - bacnet.property - bacnet.pdu.service - log.id.uid + - event.dataset '::bsap_ip_header': - soc_timestamp - source.ip @@ -659,12 +715,14 @@ soc: - bsap.message.type - bsap.number.messages - log.id.uid + - event.dataset '::bsap_ip_rdb': - soc_timestamp - bsap.application.function - bsap.application.sub.function - bsap.vector.variables - log.id.uid + - event.dataset '::bsap_serial_header': - soc_timestamp - source.ip @@ -675,11 +733,13 @@ soc: - bsap.destination.function - bsap.message.type - log.id.uid + - event.dataset '::bsap_serial_rdb': - soc_timestamp - bsap.rdb.function - bsap.vector.variables - log.id.uid + - event.dataset '::cip': - soc_timestamp - source.ip @@ -699,6 +759,7 @@ soc: - cip.device.type.name - cip.vendor.name - log.id.uid + - event.dataset '::cip_io': - soc_timestamp - source.ip @@ -708,6 +769,7 @@ soc: - cip.connection.id - cip.io.data - log.id.uid + - event.dataset '::cotp': - soc_timestamp - source.ip @@ -716,6 +778,7 @@ soc: - destination.port - cotp.pdu.name - log.id.uid + - event.dataset '::ecat_arp_info': - soc_timestamp - source.ip @@ -723,6 +786,7 @@ soc: - source.mac - destination.mac - ecat.arp.type + - event.dataset '::ecat_aoe_info': - soc_timestamp - source.mac @@ -730,6 +794,7 @@ soc: - destination.mac - destination.port - ecat.command + - event.dataset '::ecat_coe_info': - soc_timestamp - ecat.message.number @@ -737,6 +802,7 @@ soc: - ecat.request.response.type - ecat.index - ecat.sub.index + - event.dataset '::ecat_dev_info': - soc_timestamp - ecat.device.type @@ -744,17 +810,20 @@ soc: - ecat.ram.size - ecat.revision - ecat.slave.address + - event.dataset '::ecat_log_address': - soc_timestamp - source.mac - destination.mac - ecat.command + - event.dataset '::ecat_registers': - soc_timestamp - source.mac - destination.mac - ecat.command - ecat.register.type + - event.dataset '::enip': - soc_timestamp - source.ip @@ -773,6 +842,7 @@ soc: - destination.port - modbus.function - log.id.uid + - event.dataset '::opcua_binary': - soc_timestamp - source.ip @@ -782,6 +852,7 @@ soc: - opcua.identifier_string - opcua.message_type - log.id.uid + - event.dataset '::opcua_binary_activate_session': - soc_timestamp - source.ip @@ -792,6 +863,7 @@ soc: - opcua.identifier_string - opcua.user_name - log.id.uid + - event.dataset '::opcua_binary_activate_session_diagnostic_info': - soc_timestamp - source.ip @@ -801,6 +873,7 @@ soc: - opcua.activate_session_diag_info_link_id - opcua.diag_info_link_id - log.id.uid + - event.dataset '::opcua_binary_activate_session_locale_id': - soc_timestamp - source.ip @@ -810,6 +883,7 @@ soc: - opcua.local_id - opcua.locale_link_id - log.id.uid + - event.dataset '::opcua_binary_browse': - soc_timestamp - source.ip @@ -819,6 +893,7 @@ soc: - opcua.link_id - opcua.service_type - log.id.uid + - event.dataset '::opcua_binary_browse_description': - soc_timestamp - source.ip @@ -826,6 +901,7 @@ soc: - destination.ip - destination.port - log.id.uid + - event.dataset '::opcua_binary_browse_response_references': - soc_timestamp - source.ip @@ -835,6 +911,7 @@ soc: - opcua.node_class - opcua.display_name_text - log.id.uid + - event.dataset '::opcua_binary_browse_result': - soc_timestamp - source.ip @@ -843,6 +920,7 @@ soc: - destination.port - opcua.response_link_id - log.id.uid + - event.dataset '::opcua_binary_create_session': - soc_timestamp - source.ip @@ -851,6 +929,7 @@ soc: - destination.port - opcua.link_id - log.id.uid + - event.dataset '::opcua_binary_create_session_endpoints': - soc_timestamp - source.ip @@ -860,6 +939,7 @@ soc: - opcua.endpoint_link_id - opcua.endpoint_url - log.id.uid + - event.dataset '::opcua_binary_create_session_user_token': - soc_timestamp - source.ip @@ -868,6 +948,7 @@ soc: - destination.port - opcua.user_token_link_id - log.id.uid + - event.dataset '::opcua_binary_create_subscription': - soc_timestamp - source.ip @@ -876,6 +957,7 @@ soc: - destination.port - opcua.link_id - log.id.uid + - event.dataset '::opcua_binary_get_endpoints': - soc_timestamp - source.ip @@ -885,6 +967,7 @@ soc: - opcua.endpoint_url - opcua.link_id - log.id.uid + - event.dataset '::opcua_binary_get_endpoints_description': - soc_timestamp - source.ip @@ -894,6 +977,7 @@ soc: - opcua.endpoint_description_link_id - opcua.endpoint_uri - log.id.uid + - event.dataset '::opcua_binary_get_endpoints_user_token': - soc_timestamp - source.ip @@ -903,6 +987,7 @@ soc: - opcua.user_token_link_id - opcua.user_token_type - log.id.uid + - event.dataset '::opcua_binary_read': - soc_timestamp - source.ip @@ -912,6 +997,7 @@ soc: - opcua.link_id - opcua.read_results_link_id - log.id.uid + - event.dataset '::opcua_binary_status_code_detail': - soc_timestamp - source.ip @@ -921,6 +1007,7 @@ soc: - opcua.info_type_string - opcua.source_string - log.id.uid + - event.dataset '::profinet': - soc_timestamp - source.ip @@ -930,6 +1017,7 @@ soc: - profinet.index - profinet.operation_type - log.id.uid + - event.dataset '::profinet_dce_rpc': - soc_timestamp - source.ip @@ -938,6 +1026,7 @@ soc: - destination.port - profinet.operation - log.id.uid + - event.dataset '::s7comm': - soc_timestamp - source.ip @@ -947,6 +1036,7 @@ soc: - s7.ros.control.name - s7.function.name - log.id.uid + - event.dataset '::s7comm_plus': - soc_timestamp - source.ip @@ -956,6 +1046,7 @@ soc: - s7.opcode.name - s7.version - log.id.uid + - event.dataset '::s7comm_read_szl': - soc_timestamp - source.ip @@ -965,6 +1056,7 @@ soc: - s7.szl_id_name - s7.return_code_name - log.id.uid + - event.dataset '::s7comm_upload_download': - soc_timestamp - source.ip @@ -974,6 +1066,7 @@ soc: - s7.ros.control.name - s7.function_code - log.id.uid + - event.dataset '::tds': - soc_timestamp - source.ip From 9c6f3f480814f50bea09507a0b3de32645e29820 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Fri, 22 Mar 2024 13:41:44 -0400 Subject: [PATCH 250/777] FIX: Specify that static IP address is recommended #12643 --- setup/so-whiptail | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/setup/so-whiptail b/setup/so-whiptail index ff8c9fe8d..904654c9b 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -288,9 +288,9 @@ whiptail_dhcp_or_static() { [ -n "$TESTING" ] && return address_type=$(whiptail --title "$whiptail_title" --menu \ - "Choose how to set up your management interface:" 20 78 4 \ - "STATIC" "Set a static IPv4 address" \ - "DHCP" "Use DHCP to configure the Management Interface" 3>&1 1>&2 2>&3 ) + "Choose how to set up your management interface. We recommend using a static IP address." 20 78 4 \ + "STATIC" "Set a static IPv4 address (recommended)" \ + "DHCP" "Use DHCP to configure the management interface" 3>&1 1>&2 2>&3 ) local exitstatus=$? whiptail_check_exitstatus $exitstatus From bb0da2a5c5b21e23fe54f18dc78d23bb77f9e963 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 22 Mar 2024 14:34:14 -0400 Subject: [PATCH 251/777] add additional suricata af-packet config items --- salt/suricata/defaults.yaml | 7 +++++++ salt/suricata/map.jinja | 7 +++++++ salt/suricata/soc_suricata.yaml | 35 +++++++++++++++++++++++++++++++++ 3 files changed, 49 insertions(+) diff --git a/salt/suricata/defaults.yaml b/salt/suricata/defaults.yaml index 0252d3a81..8680fbda2 100644 --- a/salt/suricata/defaults.yaml +++ b/salt/suricata/defaults.yaml @@ -33,6 +33,13 @@ suricata: threads: 1 tpacket-v3: "yes" ring-size: 5000 + mmap-locked: "yes" + block-size: 32768 + block-timeout: 10 + use-emergency-flush: "yes" + buffer-size: 32768 + disable-promisc: "no" + checksum-checks: kernel vars: address-groups: HOME_NET: diff --git a/salt/suricata/map.jinja b/salt/suricata/map.jinja index 7f7b04aef..9d5581030 100644 --- a/salt/suricata/map.jinja +++ b/salt/suricata/map.jinja @@ -37,6 +37,13 @@ threads: {{ SURICATAMERGED.config['af-packet'].threads }} tpacket-v3: {{ SURICATAMERGED.config['af-packet']['tpacket-v3'] }} ring-size: {{ SURICATAMERGED.config['af-packet']['ring-size'] }} + mmap-locked: {{ SURICATAMERGED.config['af-packet']['mmap-locked'] }} + block-size: {{ SURICATAMERGED.config['af-packet']['block-size'] }} + block-timeout: {{ SURICATAMERGED.config['af-packet']['block-timeout'] }} + use-emergency-flush: {{ SURICATAMERGED.config['af-packet']['use-emergency-flush'] }} + buffer-size: {{ SURICATAMERGED.config['af-packet']['buffer-size'] }} + disable-promisc: {{ SURICATAMERGED.config['af-packet']['disable-promisc'] }} + checksum-checks: {{ SURICATAMERGED.config['af-packet']['checksum-checks'] }} {% endload %} {% do SURICATAMERGED.config.pop('af-packet') %} {% do SURICATAMERGED.config.update({'af-packet': afpacket}) %} diff --git a/salt/suricata/soc_suricata.yaml b/salt/suricata/soc_suricata.yaml index 13a709c4a..9843d8c97 100644 --- a/salt/suricata/soc_suricata.yaml +++ b/salt/suricata/soc_suricata.yaml @@ -94,6 +94,41 @@ suricata: description: Buffer size for packets per thread. forcedType: int helpLink: suricata.html + mmap-locked: + description: Prevent swapping by locking the memory map. + advanced: True + regex: ^(yes|no)$ + helpLink: suricata.html + block-size: + description: This must be configured to a sufficiently high value to accommodate a significant number of packets, considering byte size and MTU constraints. Ensure it aligns with a power of 2 and is a multiple of the page size. + advanced: True + forcedType: int + helpLink: suricata.html + block-timeout: + description: If a block remains unfilled after the specified block-timeout milliseconds, it is passed to userspace. + advanced: True + forcedType: int + helpLink: suricata.html + use-emergency-flush: + description: In high-traffic environments, enabling this option to 'yes' aids in recovering from packet drop occurrences. However, it may lead to some packets, possibly at max ring flush, not being inspected. + advanced: True + regex: ^(yes|no)$ + helpLink: suricata.html + buffer-size: + description: Increasing the value of the receive buffer may improve performance. + advanced: True + forcedType: int + helpLink: suricata.html + disable-promisc: + description: Promiscuous mode can be disabled by setting this to "yes". + advanced: True + regex: ^(yes|no)$ + helpLink: suricata.html + checksum-checks: + description: "Opt for the checksum verification mode suitable for the interface. During capture, it's possible that some packets may exhibit invalid checksums due to the network card handling the checksum computation. You have several options: 'kernel': Relies on indications sent by the kernel for each packet (default). 'yes': Enforces checksum validation. 'no': Disables checksum validation. 'auto': Suricata employs a statistical approach to detect checksum offloading." + advanced: True + regex: ^(kernel|yes|no|auto)$ + helpLink: suricata.html threading: set-cpu-affinity: description: Bind(yes) or unbind(no) management and worker threads to a core or range of cores. From 3d04d3703036cf883d6a8839e8a91e98316a5c41 Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Fri, 22 Mar 2024 10:31:09 -0600 Subject: [PATCH 252/777] Update ElastAlert Config with Default Repos --- salt/soc/defaults.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index bab229aeb..a7ee65f57 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1186,6 +1186,9 @@ soc: denyRegex: '' elastAlertRulesFolder: /opt/sensoroni/elastalert rulesFingerprintFile: /opt/sensoroni/fingerprints/sigma.fingerprint + rulesRepos: + - repo: https://github.com/Security-Onion-Solutions/securityonion-resources + license: DRL sigmaRulePackages: - core - emerging_threats_addon From 237946e916ee6bfc9f9967f9244ea0651c8dd755 Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Fri, 22 Mar 2024 13:51:59 -0600 Subject: [PATCH 253/777] Specify Folder in Rule Repo --- salt/soc/defaults.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index a7ee65f57..4b3d23afe 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1189,6 +1189,7 @@ soc: rulesRepos: - repo: https://github.com/Security-Onion-Solutions/securityonion-resources license: DRL + folder: sigma/stable sigmaRulePackages: - core - emerging_threats_addon From 81f3d69eb9b759bd4b98048d85184d9b3c31ddfb Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 22 Mar 2024 15:55:59 -0400 Subject: [PATCH 254/777] remove mmap-locked. --- salt/suricata/defaults.yaml | 1 - salt/suricata/map.jinja | 11 +++++------ salt/suricata/soc_suricata.yaml | 5 ----- 3 files changed, 5 insertions(+), 12 deletions(-) diff --git a/salt/suricata/defaults.yaml b/salt/suricata/defaults.yaml index 8680fbda2..914c045b1 100644 --- a/salt/suricata/defaults.yaml +++ b/salt/suricata/defaults.yaml @@ -33,7 +33,6 @@ suricata: threads: 1 tpacket-v3: "yes" ring-size: 5000 - mmap-locked: "yes" block-size: 32768 block-timeout: 10 use-emergency-flush: "yes" diff --git a/salt/suricata/map.jinja b/salt/suricata/map.jinja index 9d5581030..55c9aab4c 100644 --- a/salt/suricata/map.jinja +++ b/salt/suricata/map.jinja @@ -32,17 +32,16 @@ - interface: {{ GLOBALS.sensor.interface }} cluster-id: {{ SURICATAMERGED.config['af-packet']['cluster-id'] }} cluster-type: {{ SURICATAMERGED.config['af-packet']['cluster-type'] }} - defrag: {{ SURICATAMERGED.config['af-packet'].defrag }} - use-mmap: {{ SURICATAMERGED.config['af-packet']['use-mmap'] }} + defrag: "{{ SURICATAMERGED.config['af-packet'].defrag }}" + use-mmap: "{{ SURICATAMERGED.config['af-packet']['use-mmap'] }}" threads: {{ SURICATAMERGED.config['af-packet'].threads }} - tpacket-v3: {{ SURICATAMERGED.config['af-packet']['tpacket-v3'] }} + tpacket-v3: "{{ SURICATAMERGED.config['af-packet']['tpacket-v3'] }}" ring-size: {{ SURICATAMERGED.config['af-packet']['ring-size'] }} - mmap-locked: {{ SURICATAMERGED.config['af-packet']['mmap-locked'] }} block-size: {{ SURICATAMERGED.config['af-packet']['block-size'] }} block-timeout: {{ SURICATAMERGED.config['af-packet']['block-timeout'] }} - use-emergency-flush: {{ SURICATAMERGED.config['af-packet']['use-emergency-flush'] }} + use-emergency-flush: "{{ SURICATAMERGED.config['af-packet']['use-emergency-flush'] }}" buffer-size: {{ SURICATAMERGED.config['af-packet']['buffer-size'] }} - disable-promisc: {{ SURICATAMERGED.config['af-packet']['disable-promisc'] }} + disable-promisc: "{{ SURICATAMERGED.config['af-packet']['disable-promisc'] }}" checksum-checks: {{ SURICATAMERGED.config['af-packet']['checksum-checks'] }} {% endload %} {% do SURICATAMERGED.config.pop('af-packet') %} diff --git a/salt/suricata/soc_suricata.yaml b/salt/suricata/soc_suricata.yaml index 9843d8c97..c9ba80f01 100644 --- a/salt/suricata/soc_suricata.yaml +++ b/salt/suricata/soc_suricata.yaml @@ -94,11 +94,6 @@ suricata: description: Buffer size for packets per thread. forcedType: int helpLink: suricata.html - mmap-locked: - description: Prevent swapping by locking the memory map. - advanced: True - regex: ^(yes|no)$ - helpLink: suricata.html block-size: description: This must be configured to a sufficiently high value to accommodate a significant number of packets, considering byte size and MTU constraints. Ensure it aligns with a power of 2 and is a multiple of the page size. advanced: True From 029d8a0e8fd50c3ffa75a04572d15e6c10c60202 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 25 Mar 2024 09:30:41 -0400 Subject: [PATCH 255/777] handle yes/no on checksum-checks --- salt/suricata/map.jinja | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/salt/suricata/map.jinja b/salt/suricata/map.jinja index 55c9aab4c..2a3adf5f1 100644 --- a/salt/suricata/map.jinja +++ b/salt/suricata/map.jinja @@ -42,7 +42,11 @@ use-emergency-flush: "{{ SURICATAMERGED.config['af-packet']['use-emergency-flush'] }}" buffer-size: {{ SURICATAMERGED.config['af-packet']['buffer-size'] }} disable-promisc: "{{ SURICATAMERGED.config['af-packet']['disable-promisc'] }}" +{% if SURICATAMERGED.config['af-packet']['checksum-checks'] in ['yes', 'no'] %} + checksum-checks: "{{ SURICATAMERGED.config['af-packet']['checksum-checks'] }}" +{% else %} checksum-checks: {{ SURICATAMERGED.config['af-packet']['checksum-checks'] }} +{% endif %} {% endload %} {% do SURICATAMERGED.config.pop('af-packet') %} {% do SURICATAMERGED.config.update({'af-packet': afpacket}) %} From 5e21da443f02bf83a024962977bcf310abbfbfb9 Mon Sep 17 00:00:00 2001 From: Wes Date: Mon, 25 Mar 2024 13:58:32 +0000 Subject: [PATCH 256/777] Minor verbiage updates --- salt/elasticsearch/files/ingest/logs-pfsense.log-1.16.0 | 2 +- .../elasticsearch/files/ingest/logs-pfsense.log-1.16.0-suricata | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/elasticsearch/files/ingest/logs-pfsense.log-1.16.0 b/salt/elasticsearch/files/ingest/logs-pfsense.log-1.16.0 index f53abb0e3..af31e1518 100644 --- a/salt/elasticsearch/files/ingest/logs-pfsense.log-1.16.0 +++ b/salt/elasticsearch/files/ingest/logs-pfsense.log-1.16.0 @@ -1,5 +1,5 @@ { - "description": "Pipeline for PFsense", + "description": "Pipeline for pfSense", "processors": [ { "set": { diff --git a/salt/elasticsearch/files/ingest/logs-pfsense.log-1.16.0-suricata b/salt/elasticsearch/files/ingest/logs-pfsense.log-1.16.0-suricata index 4a00f498f..f3a14af44 100644 --- a/salt/elasticsearch/files/ingest/logs-pfsense.log-1.16.0-suricata +++ b/salt/elasticsearch/files/ingest/logs-pfsense.log-1.16.0-suricata @@ -1,5 +1,5 @@ { - "description": "Pipeline for parsing PFsense Squid logs.", + "description": "Pipeline for parsing pfSense Suricata logs.", "processors": [ { "pipeline": { From 49fa800b2b44a4d6d515f047cd8a1185cf975b1a Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Mon, 25 Mar 2024 14:45:50 -0400 Subject: [PATCH 257/777] Add bindings for sigma repos --- salt/soc/config.sls | 9 ++++++++- salt/soc/defaults.yaml | 3 ++- salt/soc/enabled.sls | 1 + 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/salt/soc/config.sls b/salt/soc/config.sls index e4dad8df2..ad0ab1c8d 100644 --- a/salt/soc/config.sls +++ b/salt/soc/config.sls @@ -9,7 +9,14 @@ include: - manager.sync_es_users -socdirtest: +sigmarepodir: + file.directory: + - name: /opt/so/conf/sigma/repos + - user: 939 + - group: 939 + - makedirs: True + +socdirelastaertrules: file.directory: - name: /opt/so/rules/elastalert/rules - user: 939 diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 8bb180567..5e7b423cd 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1185,10 +1185,11 @@ soc: communityRulesImportFrequencySeconds: 86400 denyRegex: '' elastAlertRulesFolder: /opt/sensoroni/elastalert + reposFolder: /opt/sensoroni/sigma/repos rulesFingerprintFile: /opt/sensoroni/fingerprints/sigma.fingerprint rulesRepos: - repo: https://github.com/Security-Onion-Solutions/securityonion-resources - license: DRL + license: Elastic-2.0 folder: sigma/stable sigmaRulePackages: - core diff --git a/salt/soc/enabled.sls b/salt/soc/enabled.sls index 93ca07ac8..bbe36e5b7 100644 --- a/salt/soc/enabled.sls +++ b/salt/soc/enabled.sls @@ -24,6 +24,7 @@ so-soc: - binds: - /nsm/rules:/nsm/rules:rw - /opt/so/conf/strelka:/opt/sensoroni/yara:rw + - /opt/so/conf/sigma:/opt/sensoroni/sigma:rw - /opt/so/rules/elastalert/rules:/opt/sensoroni/elastalert:rw - /opt/so/conf/soc/fingerprints:/opt/sensoroni/fingerprints:rw - /nsm/soc/jobs:/opt/sensoroni/jobs:rw From d7ecad4333177ad6ebc78bd5f305f780581fed6f Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Mon, 25 Mar 2024 19:42:31 -0400 Subject: [PATCH 258/777] Initial cut to remove Playbook and deps --- pillar/top.sls | 10 - salt/allowed_states.map.jinja | 12 - salt/common/tools/sbin/so-image-common | 3 - salt/common/tools/sbin/so-ip-update | 4 - salt/common/tools/sbin/so-log-check | 5 - salt/docker/defaults.yaml | 21 - salt/docker/soc_docker.yaml | 3 - salt/firewall/containers.map.jinja | 4 - salt/firewall/defaults.yaml | 16 - salt/firewall/soc_firewall.yaml | 6 - salt/manager/soc_manager.yaml | 4 - salt/manager/tools/sbin/so-minion | 32 - salt/mysql/config.sls | 89 - salt/mysql/defaults.yaml | 2 - salt/mysql/disabled.sls | 27 - salt/mysql/enabled.sls | 84 - salt/mysql/etc/my.cnf | 32 - salt/mysql/etc/mypass | 1 - salt/mysql/init.sls | 14 - salt/mysql/map.jinja | 7 - salt/mysql/soc_mysql.yaml | 4 - salt/mysql/sostatus.sls | 21 - salt/mysql/tools/sbin/so-mysql-restart | 12 - salt/mysql/tools/sbin/so-mysql-start | 12 - salt/mysql/tools/sbin/so-mysql-stop | 12 - salt/nginx/etc/nginx.conf | 27 - salt/playbook/automation_user_create.sls | 19 - salt/playbook/config.sls | 120 -- salt/playbook/db_init.sls | 14 - salt/playbook/defaults.yaml | 2 - salt/playbook/disabled.sls | 37 - salt/playbook/enabled.sls | 93 - salt/playbook/files/automation_user_create.sh | 49 - salt/playbook/files/playbook_db_init.sh | 17 - salt/playbook/files/playbook_db_init.sql | 1788 ----------------- salt/playbook/init.sls | 14 - salt/playbook/map.jinja | 2 - salt/playbook/soc_playbook.yaml | 4 - salt/playbook/sostatus.sls | 21 - salt/playbook/tools/sbin/so-playbook-import | 14 - salt/playbook/tools/sbin/so-playbook-reset | 22 - salt/playbook/tools/sbin/so-playbook-restart | 12 - .../tools/sbin/so-playbook-ruleupdate | 12 - .../tools/sbin/so-playbook-sigma-refresh | 29 - salt/playbook/tools/sbin/so-playbook-start | 12 - salt/playbook/tools/sbin/so-playbook-stop | 12 - salt/playbook/tools/sbin/so-playbook-sync | 16 - salt/soc/defaults.yaml | 5 - salt/soctopus/config.sls | 88 - salt/soctopus/defaults.yaml | 2 - salt/soctopus/disabled.sls | 27 - salt/soctopus/enabled.sls | 72 - salt/soctopus/files/SOCtopus.conf | 77 - .../files/templates/es-generic.template | 5 - .../soctopus/files/templates/generic.template | 22 - .../soctopus/files/templates/osquery.template | 13 - salt/soctopus/init.sls | 13 - salt/soctopus/map.jinja | 7 - salt/soctopus/soc_soctopus.yaml | 10 - salt/soctopus/sostatus.sls | 21 - salt/soctopus/tools/sbin/so-soctopus-restart | 12 - salt/soctopus/tools/sbin/so-soctopus-start | 12 - salt/soctopus/tools/sbin/so-soctopus-stop | 12 - salt/top.sls | 12 - setup/so-functions | 34 +- setup/so-setup | 4 - setup/so-variables | 6 - 67 files changed, 1 insertion(+), 3226 deletions(-) delete mode 100644 salt/mysql/config.sls delete mode 100644 salt/mysql/defaults.yaml delete mode 100644 salt/mysql/disabled.sls delete mode 100644 salt/mysql/enabled.sls delete mode 100644 salt/mysql/etc/my.cnf delete mode 100644 salt/mysql/etc/mypass delete mode 100644 salt/mysql/init.sls delete mode 100644 salt/mysql/map.jinja delete mode 100644 salt/mysql/soc_mysql.yaml delete mode 100644 salt/mysql/sostatus.sls delete mode 100755 salt/mysql/tools/sbin/so-mysql-restart delete mode 100755 salt/mysql/tools/sbin/so-mysql-start delete mode 100755 salt/mysql/tools/sbin/so-mysql-stop delete mode 100644 salt/playbook/automation_user_create.sls delete mode 100644 salt/playbook/config.sls delete mode 100644 salt/playbook/db_init.sls delete mode 100644 salt/playbook/defaults.yaml delete mode 100644 salt/playbook/disabled.sls delete mode 100644 salt/playbook/enabled.sls delete mode 100644 salt/playbook/files/automation_user_create.sh delete mode 100644 salt/playbook/files/playbook_db_init.sh delete mode 100644 salt/playbook/files/playbook_db_init.sql delete mode 100644 salt/playbook/init.sls delete mode 100644 salt/playbook/map.jinja delete mode 100644 salt/playbook/soc_playbook.yaml delete mode 100644 salt/playbook/sostatus.sls delete mode 100755 salt/playbook/tools/sbin/so-playbook-import delete mode 100755 salt/playbook/tools/sbin/so-playbook-reset delete mode 100755 salt/playbook/tools/sbin/so-playbook-restart delete mode 100755 salt/playbook/tools/sbin/so-playbook-ruleupdate delete mode 100755 salt/playbook/tools/sbin/so-playbook-sigma-refresh delete mode 100755 salt/playbook/tools/sbin/so-playbook-start delete mode 100755 salt/playbook/tools/sbin/so-playbook-stop delete mode 100755 salt/playbook/tools/sbin/so-playbook-sync delete mode 100644 salt/soctopus/config.sls delete mode 100644 salt/soctopus/defaults.yaml delete mode 100644 salt/soctopus/disabled.sls delete mode 100644 salt/soctopus/enabled.sls delete mode 100644 salt/soctopus/files/SOCtopus.conf delete mode 100644 salt/soctopus/files/templates/es-generic.template delete mode 100644 salt/soctopus/files/templates/generic.template delete mode 100644 salt/soctopus/files/templates/osquery.template delete mode 100644 salt/soctopus/init.sls delete mode 100644 salt/soctopus/map.jinja delete mode 100644 salt/soctopus/soc_soctopus.yaml delete mode 100644 salt/soctopus/sostatus.sls delete mode 100755 salt/soctopus/tools/sbin/so-soctopus-restart delete mode 100755 salt/soctopus/tools/sbin/so-soctopus-start delete mode 100755 salt/soctopus/tools/sbin/so-soctopus-stop diff --git a/pillar/top.sls b/pillar/top.sls index 9af62aa0b..b6d6c2e73 100644 --- a/pillar/top.sls +++ b/pillar/top.sls @@ -43,8 +43,6 @@ base: - soc.soc_soc - soc.adv_soc - soc.license - - soctopus.soc_soctopus - - soctopus.adv_soctopus - kibana.soc_kibana - kibana.adv_kibana - kratos.soc_kratos @@ -61,8 +59,6 @@ base: - elastalert.adv_elastalert - backup.soc_backup - backup.adv_backup - - soctopus.soc_soctopus - - soctopus.adv_soctopus - minions.{{ grains.id }} - minions.adv_{{ grains.id }} - stig.soc_stig @@ -108,8 +104,6 @@ base: - soc.soc_soc - soc.adv_soc - soc.license - - soctopus.soc_soctopus - - soctopus.adv_soctopus - kibana.soc_kibana - kibana.adv_kibana - strelka.soc_strelka @@ -165,8 +159,6 @@ base: - soc.soc_soc - soc.adv_soc - soc.license - - soctopus.soc_soctopus - - soctopus.adv_soctopus - kibana.soc_kibana - kibana.adv_kibana - strelka.soc_strelka @@ -262,8 +254,6 @@ base: - soc.soc_soc - soc.adv_soc - soc.license - - soctopus.soc_soctopus - - soctopus.adv_soctopus - kibana.soc_kibana - kibana.adv_kibana - backup.soc_backup diff --git a/salt/allowed_states.map.jinja b/salt/allowed_states.map.jinja index 3ead8b26e..7fbf4ff14 100644 --- a/salt/allowed_states.map.jinja +++ b/salt/allowed_states.map.jinja @@ -34,7 +34,6 @@ 'suricata', 'utility', 'schedule', - 'soctopus', 'tcpreplay', 'docker_clean' ], @@ -101,7 +100,6 @@ 'suricata.manager', 'utility', 'schedule', - 'soctopus', 'docker_clean', 'stig' ], @@ -123,7 +121,6 @@ 'suricata.manager', 'utility', 'schedule', - 'soctopus', 'docker_clean', 'stig' ], @@ -157,7 +154,6 @@ 'healthcheck', 'utility', 'schedule', - 'soctopus', 'tcpreplay', 'docker_clean', 'stig' @@ -200,10 +196,6 @@ ], }, grain='role') %} - {% if grains.role in ['so-eval', 'so-manager', 'so-managersearch', 'so-standalone'] %} - {% do allowed_states.append('mysql') %} - {% endif %} - {%- if grains.role in ['so-sensor', 'so-eval', 'so-standalone', 'so-heavynode'] %} {% do allowed_states.append('zeek') %} {%- endif %} @@ -229,10 +221,6 @@ {% do allowed_states.append('elastalert') %} {% endif %} - {% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch'] %} - {% do allowed_states.append('playbook') %} - {% endif %} - {% if grains.role in ['so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-receiver'] %} {% do allowed_states.append('logstash') %} {% endif %} diff --git a/salt/common/tools/sbin/so-image-common b/salt/common/tools/sbin/so-image-common index 7900b3c52..752ec20e0 100755 --- a/salt/common/tools/sbin/so-image-common +++ b/salt/common/tools/sbin/so-image-common @@ -53,13 +53,10 @@ container_list() { "so-kibana" "so-kratos" "so-logstash" - "so-mysql" "so-nginx" "so-pcaptools" - "so-playbook" "so-redis" "so-soc" - "so-soctopus" "so-steno" "so-strelka-backend" "so-strelka-filestream" diff --git a/salt/common/tools/sbin/so-ip-update b/salt/common/tools/sbin/so-ip-update index 7278afb94..6a330f644 100755 --- a/salt/common/tools/sbin/so-ip-update +++ b/salt/common/tools/sbin/so-ip-update @@ -49,10 +49,6 @@ if [ "$CONTINUE" == "y" ]; then sed -i "s|$OLD_IP|$NEW_IP|g" $file done - echo "Granting MySQL root user permissions on $NEW_IP" - docker exec -i so-mysql mysql --user=root --password=$(lookup_pillar_secret 'mysql') -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'$NEW_IP' IDENTIFIED BY '$(lookup_pillar_secret 'mysql')' WITH GRANT OPTION;" &> /dev/null - echo "Removing MySQL root user from $OLD_IP" - docker exec -i so-mysql mysql --user=root --password=$(lookup_pillar_secret 'mysql') -e "DROP USER 'root'@'$OLD_IP';" &> /dev/null echo "Updating Kibana dashboards" salt-call state.apply kibana.so_savedobjects_defaults -l info queue=True diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index 3bf2bc778..b5f9d77cb 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -155,15 +155,11 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|fail\\(error\\)" # redis/python generic stack line, rely on other lines for actual error EXCLUDED_ERRORS="$EXCLUDED_ERRORS|urlerror" # idstools connection timeout EXCLUDED_ERRORS="$EXCLUDED_ERRORS|timeouterror" # idstools connection timeout - EXCLUDED_ERRORS="$EXCLUDED_ERRORS|forbidden" # playbook EXCLUDED_ERRORS="$EXCLUDED_ERRORS|_ml" # Elastic ML errors EXCLUDED_ERRORS="$EXCLUDED_ERRORS|context canceled" # elastic agent during shutdown - EXCLUDED_ERRORS="$EXCLUDED_ERRORS|exited with code 128" # soctopus errors during forced restart by highstate EXCLUDED_ERRORS="$EXCLUDED_ERRORS|geoip databases update" # airgap can't update GeoIP DB EXCLUDED_ERRORS="$EXCLUDED_ERRORS|filenotfounderror" # bug in 2.4.10 filecheck salt state caused duplicate cronjobs EXCLUDED_ERRORS="$EXCLUDED_ERRORS|salt-minion-check" # bug in early 2.4 place Jinja script in non-jinja salt dir causing cron output errors - EXCLUDED_ERRORS="$EXCLUDED_ERRORS|generating elastalert config" # playbook expected error - EXCLUDED_ERRORS="$EXCLUDED_ERRORS|activerecord" # playbook expected error EXCLUDED_ERRORS="$EXCLUDED_ERRORS|monitoring.metrics" # known issue with elastic agent casting the field incorrectly if an integer value shows up before a float EXCLUDED_ERRORS="$EXCLUDED_ERRORS|repodownload.conf" # known issue with reposync on pre-2.4.20 EXCLUDED_ERRORS="$EXCLUDED_ERRORS|missing versions record" # stenographer corrupt index @@ -211,7 +207,6 @@ RESULT=0 CONTAINER_IDS=$(docker ps -q) exclude_container so-kibana # kibana error logs are too verbose with large varieties of errors most of which are temporary exclude_container so-idstools # ignore due to known issues and noisy logging -exclude_container so-playbook # ignore due to several playbook known issues for container_id in $CONTAINER_IDS; do container_name=$(docker ps --format json | jq ". | select(.ID==\"$container_id\")|.Names") diff --git a/salt/docker/defaults.yaml b/salt/docker/defaults.yaml index 4bc212fbe..2ceaecaa7 100644 --- a/salt/docker/defaults.yaml +++ b/salt/docker/defaults.yaml @@ -67,13 +67,6 @@ docker: custom_bind_mounts: [] extra_hosts: [] extra_env: [] - 'so-mysql': - final_octet: 30 - port_bindings: - - 0.0.0.0:3306:3306 - custom_bind_mounts: [] - extra_hosts: [] - extra_env: [] 'so-nginx': final_octet: 31 port_bindings: @@ -91,13 +84,6 @@ docker: custom_bind_mounts: [] extra_hosts: [] extra_env: [] - 'so-playbook': - final_octet: 32 - port_bindings: - - 0.0.0.0:3000:3000 - custom_bind_mounts: [] - extra_hosts: [] - extra_env: [] 'so-redis': final_octet: 33 port_bindings: @@ -118,13 +104,6 @@ docker: custom_bind_mounts: [] extra_hosts: [] extra_env: [] - 'so-soctopus': - final_octet: 35 - port_bindings: - - 0.0.0.0:7000:7000 - custom_bind_mounts: [] - extra_hosts: [] - extra_env: [] 'so-strelka-backend': final_octet: 36 custom_bind_mounts: [] diff --git a/salt/docker/soc_docker.yaml b/salt/docker/soc_docker.yaml index 6e0efeb20..da078941a 100644 --- a/salt/docker/soc_docker.yaml +++ b/salt/docker/soc_docker.yaml @@ -46,14 +46,11 @@ docker: so-kibana: *dockerOptions so-kratos: *dockerOptions so-logstash: *dockerOptions - so-mysql: *dockerOptions so-nginx: *dockerOptions so-nginx-fleet-node: *dockerOptions - so-playbook: *dockerOptions so-redis: *dockerOptions so-sensoroni: *dockerOptions so-soc: *dockerOptions - so-soctopus: *dockerOptions so-strelka-backend: *dockerOptions so-strelka-filestream: *dockerOptions so-strelka-frontend: *dockerOptions diff --git a/salt/firewall/containers.map.jinja b/salt/firewall/containers.map.jinja index b3ead0f4c..aa9a4d0a5 100644 --- a/salt/firewall/containers.map.jinja +++ b/salt/firewall/containers.map.jinja @@ -9,11 +9,9 @@ 'so-influxdb', 'so-kibana', 'so-kratos', - 'so-mysql', 'so-nginx', 'so-redis', 'so-soc', - 'so-soctopus', 'so-strelka-coordinator', 'so-strelka-gatekeeper', 'so-strelka-frontend', @@ -32,11 +30,9 @@ 'so-kibana', 'so-kratos', 'so-logstash', - 'so-mysql', 'so-nginx', 'so-redis', 'so-soc', - 'so-soctopus', 'so-strelka-coordinator', 'so-strelka-gatekeeper', 'so-strelka-frontend', diff --git a/salt/firewall/defaults.yaml b/salt/firewall/defaults.yaml index 75a70828e..b10505956 100644 --- a/salt/firewall/defaults.yaml +++ b/salt/firewall/defaults.yaml @@ -98,19 +98,11 @@ firewall: tcp: - 7788 udp: [] - mysql: - tcp: - - 3306 - udp: [] nginx: tcp: - 80 - 443 udp: [] - playbook: - tcp: - - 3000 - udp: [] redis: tcp: - 6379 @@ -178,8 +170,6 @@ firewall: hostgroups: eval: portgroups: - - playbook - - mysql - kibana - redis - influxdb @@ -363,8 +353,6 @@ firewall: hostgroups: manager: portgroups: - - playbook - - mysql - kibana - redis - influxdb @@ -559,8 +547,6 @@ firewall: hostgroups: managersearch: portgroups: - - playbook - - mysql - kibana - redis - influxdb @@ -756,8 +742,6 @@ firewall: - all standalone: portgroups: - - playbook - - mysql - kibana - redis - influxdb diff --git a/salt/firewall/soc_firewall.yaml b/salt/firewall/soc_firewall.yaml index 7a2e3b035..522684e07 100644 --- a/salt/firewall/soc_firewall.yaml +++ b/salt/firewall/soc_firewall.yaml @@ -121,15 +121,9 @@ firewall: localrules: tcp: *tcpsettings udp: *udpsettings - mysql: - tcp: *tcpsettings - udp: *udpsettings nginx: tcp: *tcpsettings udp: *udpsettings - playbook: - tcp: *tcpsettings - udp: *udpsettings redis: tcp: *tcpsettings udp: *udpsettings diff --git a/salt/manager/soc_manager.yaml b/salt/manager/soc_manager.yaml index 01d63a609..f6461a0c7 100644 --- a/salt/manager/soc_manager.yaml +++ b/salt/manager/soc_manager.yaml @@ -20,10 +20,6 @@ manager: description: String of hosts to ignore the proxy settings for. global: True helpLink: proxy.html - playbook: - description: Enable playbook 1=enabled 0=disabled. - global: True - helpLink: playbook.html proxy: description: Proxy server to use for updates. global: True diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index cb4e40ade..34e069ece 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -286,12 +286,6 @@ function add_sensor_to_minion() { echo " " >> $PILLARFILE } -function add_playbook_to_minion() { - printf '%s\n'\ - "playbook:"\ - " enabled: True"\ - " " >> $PILLARFILE -} function add_elastalert_to_minion() { printf '%s\n'\ @@ -353,13 +347,6 @@ function add_nginx_to_minion() { " " >> $PILLARFILE } -function add_soctopus_to_minion() { - printf '%s\n'\ - "soctopus:"\ - " enabled: True"\ - " " >> $PILLARFILE -} - function add_soc_to_minion() { printf '%s\n'\ "soc:"\ @@ -374,13 +361,6 @@ function add_registry_to_minion() { " " >> $PILLARFILE } -function add_mysql_to_minion() { - printf '%s\n'\ - "mysql:"\ - " enabled: True"\ - " " >> $PILLARFILE -} - function add_kratos_to_minion() { printf '%s\n'\ "kratos:"\ @@ -456,16 +436,13 @@ function createEVAL() { add_elasticsearch_to_minion add_sensor_to_minion add_strelka_to_minion - add_playbook_to_minion add_elastalert_to_minion add_kibana_to_minion add_telegraf_to_minion add_influxdb_to_minion add_nginx_to_minion - add_soctopus_to_minion add_soc_to_minion add_registry_to_minion - add_mysql_to_minion add_kratos_to_minion add_idstools_to_minion add_elastic_fleet_package_registry_to_minion @@ -478,17 +455,14 @@ function createSTANDALONE() { add_logstash_to_minion add_sensor_to_minion add_strelka_to_minion - add_playbook_to_minion add_elastalert_to_minion add_kibana_to_minion add_redis_to_minion add_telegraf_to_minion add_influxdb_to_minion add_nginx_to_minion - add_soctopus_to_minion add_soc_to_minion add_registry_to_minion - add_mysql_to_minion add_kratos_to_minion add_idstools_to_minion add_elastic_fleet_package_registry_to_minion @@ -497,17 +471,14 @@ function createSTANDALONE() { function createMANAGER() { add_elasticsearch_to_minion add_logstash_to_minion - add_playbook_to_minion add_elastalert_to_minion add_kibana_to_minion add_redis_to_minion add_telegraf_to_minion add_influxdb_to_minion add_nginx_to_minion - add_soctopus_to_minion add_soc_to_minion add_registry_to_minion - add_mysql_to_minion add_kratos_to_minion add_idstools_to_minion add_elastic_fleet_package_registry_to_minion @@ -516,17 +487,14 @@ function createMANAGER() { function createMANAGERSEARCH() { add_elasticsearch_to_minion add_logstash_to_minion - add_playbook_to_minion add_elastalert_to_minion add_kibana_to_minion add_redis_to_minion add_telegraf_to_minion add_influxdb_to_minion add_nginx_to_minion - add_soctopus_to_minion add_soc_to_minion add_registry_to_minion - add_mysql_to_minion add_kratos_to_minion add_idstools_to_minion add_elastic_fleet_package_registry_to_minion diff --git a/salt/mysql/config.sls b/salt/mysql/config.sls deleted file mode 100644 index 274f25d76..000000000 --- a/salt/mysql/config.sls +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} -{% set MYSQLPASS = salt['pillar.get']('secrets:mysql') %} - -# MySQL Setup -mysqlpkgs: - pkg.removed: - - skip_suggestions: False - - pkgs: - {% if grains['os_family'] != 'RedHat' %} - - python3-mysqldb - {% else %} - - python3-mysqlclient - {% endif %} - -mysqletcdir: - file.directory: - - name: /opt/so/conf/mysql/etc - - user: 939 - - group: 939 - - makedirs: True - -mysqlpiddir: - file.directory: - - name: /opt/so/conf/mysql/pid - - user: 939 - - group: 939 - - makedirs: True - -mysqlcnf: - file.managed: - - name: /opt/so/conf/mysql/etc/my.cnf - - source: salt://mysql/etc/my.cnf - - user: 939 - - group: 939 - -mysqlpass: - file.managed: - - name: /opt/so/conf/mysql/etc/mypass - - source: salt://mysql/etc/mypass - - user: 939 - - group: 939 - - template: jinja - - defaults: - MYSQLPASS: {{ MYSQLPASS }} - -mysqllogdir: - file.directory: - - name: /opt/so/log/mysql - - user: 939 - - group: 939 - - makedirs: True - -mysqldatadir: - file.directory: - - name: /nsm/mysql - - user: 939 - - group: 939 - - makedirs: True - -mysql_sbin: - file.recurse: - - name: /usr/sbin - - source: salt://mysql/tools/sbin - - user: 939 - - group: 939 - - file_mode: 755 - -#mysql_sbin_jinja: -# file.recurse: -# - name: /usr/sbin -# - source: salt://mysql/tools/sbin_jinja -# - user: 939 -# - group: 939 -# - file_mode: 755 -# - template: jinja - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - -{% endif %} diff --git a/salt/mysql/defaults.yaml b/salt/mysql/defaults.yaml deleted file mode 100644 index 87d8cef25..000000000 --- a/salt/mysql/defaults.yaml +++ /dev/null @@ -1,2 +0,0 @@ -mysql: - enabled: False diff --git a/salt/mysql/disabled.sls b/salt/mysql/disabled.sls deleted file mode 100644 index 805a755e4..000000000 --- a/salt/mysql/disabled.sls +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} - -include: - - mysql.sostatus - -so-mysql: - docker_container.absent: - - force: True - -so-mysql_so-status.disabled: - file.comment: - - name: /opt/so/conf/so-status/so-status.conf - - regex: ^so-mysql$ - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - -{% endif %} diff --git a/salt/mysql/enabled.sls b/salt/mysql/enabled.sls deleted file mode 100644 index 1e1a3ca1e..000000000 --- a/salt/mysql/enabled.sls +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} -{% from 'docker/docker.map.jinja' import DOCKER %} -{% from 'vars/globals.map.jinja' import GLOBALS %} -{% set MYSQLPASS = salt['pillar.get']('secrets:mysql') %} - -include: - - mysql.config - - mysql.sostatus - -{% if MYSQLPASS == None %} - -mysql_password_none: - test.configurable_test_state: - - changes: False - - result: False - - comment: "MySQL Password Error - Not Starting MySQL" - -{% else %} - -so-mysql: - docker_container.running: - - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-mysql:{{ GLOBALS.so_version }} - - hostname: so-mysql - - user: socore - - networks: - - sobridge: - - ipv4_address: {{ DOCKER.containers['so-mysql'].ip }} - - extra_hosts: - - {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }} - {% if DOCKER.containers['so-mysql'].extra_hosts %} - {% for XTRAHOST in DOCKER.containers['so-mysql'].extra_hosts %} - - {{ XTRAHOST }} - {% endfor %} - {% endif %} - - port_bindings: - {% for BINDING in DOCKER.containers['so-mysql'].port_bindings %} - - {{ BINDING }} - {% endfor %} - - environment: - - MYSQL_ROOT_HOST={{ GLOBALS.so_docker_gateway }} - - MYSQL_ROOT_PASSWORD=/etc/mypass - {% if DOCKER.containers['so-mysql'].extra_env %} - {% for XTRAENV in DOCKER.containers['so-mysql'].extra_env %} - - {{ XTRAENV }} - {% endfor %} - {% endif %} - - binds: - - /opt/so/conf/mysql/etc/my.cnf:/etc/my.cnf:ro - - /opt/so/conf/mysql/etc/mypass:/etc/mypass - - /nsm/mysql:/var/lib/mysql:rw - - /opt/so/log/mysql:/var/log/mysql:rw - {% if DOCKER.containers['so-mysql'].custom_bind_mounts %} - {% for BIND in DOCKER.containers['so-mysql'].custom_bind_mounts %} - - {{ BIND }} - {% endfor %} - {% endif %} - - cap_add: - - SYS_NICE - - watch: - - file: mysqlcnf - - file: mysqlpass - - require: - - file: mysqlcnf - - file: mysqlpass -{% endif %} - -delete_so-mysql_so-status.disabled: - file.uncomment: - - name: /opt/so/conf/so-status/so-status.conf - - regex: ^so-mysql$ - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - -{% endif %} diff --git a/salt/mysql/etc/my.cnf b/salt/mysql/etc/my.cnf deleted file mode 100644 index 621ce83d8..000000000 --- a/salt/mysql/etc/my.cnf +++ /dev/null @@ -1,32 +0,0 @@ -# For advice on how to change settings please see -# http://dev.mysql.com/doc/refman/5.7/en/server-configuration-defaults.html - -[mysqld] -# -# Remove leading # and set to the amount of RAM for the most important data -# cache in MySQL. Start at 70% of total RAM for dedicated server, else 10%. -# innodb_buffer_pool_size = 128M -# -# Remove leading # to turn on a very important data integrity option: logging -# changes to the binary log between backups. -# log_bin -# -# Remove leading # to set options mainly useful for reporting servers. -# The server defaults are faster for transactions and fast SELECTs. -# Adjust sizes as needed, experiment to find the optimal values. -# join_buffer_size = 128M -# sort_buffer_size = 2M -# read_rnd_buffer_size = 2M - -host_cache_size=0 -skip-name-resolve -datadir=/var/lib/mysql -socket=/var/lib/mysql/mysql.sock -secure-file-priv=/var/lib/mysql-files -user=socore - -log-error=/var/log/mysql/mysqld.log -pid-file=/var/run/mysqld/mysqld.pid - -# Switch back to the native password module so that playbook can connect -authentication_policy=mysql_native_password diff --git a/salt/mysql/etc/mypass b/salt/mysql/etc/mypass deleted file mode 100644 index b38bf75ec..000000000 --- a/salt/mysql/etc/mypass +++ /dev/null @@ -1 +0,0 @@ -{{ MYSQLPASS }} diff --git a/salt/mysql/init.sls b/salt/mysql/init.sls deleted file mode 100644 index 48e4f558c..000000000 --- a/salt/mysql/init.sls +++ /dev/null @@ -1,14 +0,0 @@ - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'mysql/map.jinja' import MYSQLMERGED %} - -include: -{% if MYSQLMERGED.enabled %} - - mysql.enabled -{% else %} - - mysql.disabled -{% endif %} diff --git a/salt/mysql/map.jinja b/salt/mysql/map.jinja deleted file mode 100644 index dd9a6474e..000000000 --- a/salt/mysql/map.jinja +++ /dev/null @@ -1,7 +0,0 @@ -{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one - or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at - https://securityonion.net/license; you may not use this file except in compliance with the - Elastic License 2.0. #} - -{% import_yaml 'mysql/defaults.yaml' as MYSQLDEFAULTS with context %} -{% set MYSQLMERGED = salt['pillar.get']('mysql', MYSQLDEFAULTS.mysql, merge=True) %} diff --git a/salt/mysql/soc_mysql.yaml b/salt/mysql/soc_mysql.yaml deleted file mode 100644 index 4be816d90..000000000 --- a/salt/mysql/soc_mysql.yaml +++ /dev/null @@ -1,4 +0,0 @@ -mysql: - enabled: - description: You can enable or disable MySQL. - advanced: True diff --git a/salt/mysql/sostatus.sls b/salt/mysql/sostatus.sls deleted file mode 100644 index 2f5dbba06..000000000 --- a/salt/mysql/sostatus.sls +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} - -append_so-mysql_so-status.conf: - file.append: - - name: /opt/so/conf/so-status/so-status.conf - - text: so-mysql - - unless: grep -q so-mysql /opt/so/conf/so-status/so-status.conf - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - -{% endif %} diff --git a/salt/mysql/tools/sbin/so-mysql-restart b/salt/mysql/tools/sbin/so-mysql-restart deleted file mode 100755 index 8c0583232..000000000 --- a/salt/mysql/tools/sbin/so-mysql-restart +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - - -. /usr/sbin/so-common - -/usr/sbin/so-restart mysql $1 diff --git a/salt/mysql/tools/sbin/so-mysql-start b/salt/mysql/tools/sbin/so-mysql-start deleted file mode 100755 index e68536809..000000000 --- a/salt/mysql/tools/sbin/so-mysql-start +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - - -. /usr/sbin/so-common - -/usr/sbin/so-start mysql $1 diff --git a/salt/mysql/tools/sbin/so-mysql-stop b/salt/mysql/tools/sbin/so-mysql-stop deleted file mode 100755 index 58f6072f2..000000000 --- a/salt/mysql/tools/sbin/so-mysql-stop +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - - -. /usr/sbin/so-common - -/usr/sbin/so-stop mysql $1 diff --git a/salt/nginx/etc/nginx.conf b/salt/nginx/etc/nginx.conf index 236f8da7f..52ea68daa 100644 --- a/salt/nginx/etc/nginx.conf +++ b/salt/nginx/etc/nginx.conf @@ -277,38 +277,11 @@ http { proxy_set_header X-Forwarded-Proto $scheme; } - location /playbook/ { - auth_request /auth/sessions/whoami; - proxy_pass http://{{ GLOBALS.manager }}:3000/playbook/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - - location /soctopus/ { - auth_request /auth/sessions/whoami; - proxy_pass http://{{ GLOBALS.manager }}:7000/; - proxy_read_timeout 300; - proxy_connect_timeout 300; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } location /kibana/app/soc/ { rewrite ^/kibana/app/soc/(.*) /soc/$1 permanent; } - location /kibana/app/soctopus/ { - rewrite ^/kibana/app/soctopus/(.*) /soctopus/$1 permanent; - } location /sensoroniagents/ { if ($http_authorization = "") { diff --git a/salt/playbook/automation_user_create.sls b/salt/playbook/automation_user_create.sls deleted file mode 100644 index 49ec2e795..000000000 --- a/salt/playbook/automation_user_create.sls +++ /dev/null @@ -1,19 +0,0 @@ -{% from 'vars/globals.map.jinja' import GLOBALS %} - -# This state will create the SecOps Automation user within Playbook - -include: - - playbook - -wait_for_playbook: - cmd.run: - - name: until nc -z {{ GLOBALS.manager }} 3000; do sleep 1; done - - timeout: 300 - -create_user: - cmd.script: - - source: salt://playbook/files/automation_user_create.sh - - cwd: /root - - template: jinja - - onchanges: - - cmd: wait_for_playbook diff --git a/salt/playbook/config.sls b/salt/playbook/config.sls deleted file mode 100644 index f4c2cf137..000000000 --- a/salt/playbook/config.sls +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} -{% from 'docker/docker.map.jinja' import DOCKER %} -{% from 'vars/globals.map.jinja' import GLOBALS %} -{% set MYSQLPASS = salt['pillar.get']('secrets:mysql') %} -{% set PLAYBOOKPASS = salt['pillar.get']('secrets:playbook_db') %} - - -include: - - mysql - -create_playbookdbuser: - mysql_user.present: - - name: playbookdbuser - - password: {{ PLAYBOOKPASS }} - - host: "{{ DOCKER.range.split('/')[0] }}/255.255.255.0" - - connection_host: {{ GLOBALS.manager }} - - connection_port: 3306 - - connection_user: root - - connection_pass: {{ MYSQLPASS }} - -query_playbookdbuser_grants: - mysql_query.run: - - database: playbook - - query: "GRANT ALL ON playbook.* TO 'playbookdbuser'@'{{ DOCKER.range.split('/')[0] }}/255.255.255.0';" - - connection_host: {{ GLOBALS.manager }} - - connection_port: 3306 - - connection_user: root - - connection_pass: {{ MYSQLPASS }} - -query_updatwebhooks: - mysql_query.run: - - database: playbook - - query: "update webhooks set url = 'http://{{ GLOBALS.manager_ip}}:7000/playbook/webhook' where project_id = 1" - - connection_host: {{ GLOBALS.manager }} - - connection_port: 3306 - - connection_user: root - - connection_pass: {{ MYSQLPASS }} - -query_updatename: - mysql_query.run: - - database: playbook - - query: "update custom_fields set name = 'Custom Filter' where id = 21;" - - connection_host: {{ GLOBALS.manager }} - - connection_port: 3306 - - connection_user: root - - connection_pass: {{ MYSQLPASS }} - -query_updatepluginurls: - mysql_query.run: - - database: playbook - - query: |- - update settings set value = - "--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess - project: '1' - convert_url: http://{{ GLOBALS.manager }}:7000/playbook/sigmac - create_url: http://{{ GLOBALS.manager }}:7000/playbook/play" - where id = 43 - - connection_host: {{ GLOBALS.manager }} - - connection_port: 3306 - - connection_user: root - - connection_pass: {{ MYSQLPASS }} - -playbook_sbin: - file.recurse: - - name: /usr/sbin - - source: salt://playbook/tools/sbin - - user: 939 - - group: 939 - - file_mode: 755 - -#playbook_sbin_jinja: -# file.recurse: -# - name: /usr/sbin -# - source: salt://playbook/tools/sbin_jinja -# - user: 939 -# - group: 939 -# - file_mode: 755 -# - template: jinja - -playbooklogdir: - file.directory: - - name: /opt/so/log/playbook - - dir_mode: 775 - - user: 939 - - group: 939 - - makedirs: True - -playbookfilesdir: - file.directory: - - name: /opt/so/conf/playbook/redmine-files - - dir_mode: 775 - - user: 939 - - group: 939 - - makedirs: True - -{% if 'idh' in salt['cmd.shell']("ls /opt/so/saltstack/local/pillar/minions/|awk -F'_' {'print $2'}|awk -F'.' {'print $1'}").split() %} -idh-plays: - file.recurse: - - name: /opt/so/conf/soctopus/sigma-import - - source: salt://idh/plays - - makedirs: True - cmd.run: - - name: so-playbook-import True - - onchanges: - - file: /opt/so/conf/soctopus/sigma-import -{% endif %} - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - -{% endif %} diff --git a/salt/playbook/db_init.sls b/salt/playbook/db_init.sls deleted file mode 100644 index 1b2bf7b1a..000000000 --- a/salt/playbook/db_init.sls +++ /dev/null @@ -1,14 +0,0 @@ - -# This state will import the initial default playbook database. -# If there is an existing playbook database, it will be overwritten - no backups are made. - -include: - - mysql - -salt://playbook/files/playbook_db_init.sh: - cmd.script: - - cwd: /root - - template: jinja - -'sleep 5': - cmd.run \ No newline at end of file diff --git a/salt/playbook/defaults.yaml b/salt/playbook/defaults.yaml deleted file mode 100644 index e75ec6a3c..000000000 --- a/salt/playbook/defaults.yaml +++ /dev/null @@ -1,2 +0,0 @@ -playbook: - enabled: False diff --git a/salt/playbook/disabled.sls b/salt/playbook/disabled.sls deleted file mode 100644 index c8c876cfb..000000000 --- a/salt/playbook/disabled.sls +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} - -include: - - playbook.sostatus - -so-playbook: - docker_container.absent: - - force: True - -so-playbook_so-status.disabled: - file.comment: - - name: /opt/so/conf/so-status/so-status.conf - - regex: ^so-playbook$ - -so-playbook-sync_cron: - cron.absent: - - identifier: so-playbook-sync_cron - - user: root - -so-playbook-ruleupdate_cron: - cron.absent: - - identifier: so-playbook-ruleupdate_cron - - user: root - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - -{% endif %} diff --git a/salt/playbook/enabled.sls b/salt/playbook/enabled.sls deleted file mode 100644 index e70fec693..000000000 --- a/salt/playbook/enabled.sls +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} - -{% from 'docker/docker.map.jinja' import DOCKER %} -{% from 'vars/globals.map.jinja' import GLOBALS %} -{% set PLAYBOOKPASS = salt['pillar.get']('secrets:playbook_db') %} - -include: - - playbook.config - - playbook.sostatus - -{% if PLAYBOOKPASS == None %} - -playbook_password_none: - test.configurable_test_state: - - changes: False - - result: False - - comment: "Playbook MySQL Password Error - Not Starting Playbook" - -{% else %} - -so-playbook: - docker_container.running: - - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-playbook:{{ GLOBALS.so_version }} - - hostname: playbook - - name: so-playbook - - networks: - - sobridge: - - ipv4_address: {{ DOCKER.containers['so-playbook'].ip }} - - binds: - - /opt/so/conf/playbook/redmine-files:/usr/src/redmine/files:rw - - /opt/so/log/playbook:/playbook/log:rw - {% if DOCKER.containers['so-playbook'].custom_bind_mounts %} - {% for BIND in DOCKER.containers['so-playbook'].custom_bind_mounts %} - - {{ BIND }} - {% endfor %} - {% endif %} - - extra_hosts: - - {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }} - {% if DOCKER.containers['so-playbook'].extra_hosts %} - {% for XTRAHOST in DOCKER.containers['so-playbook'].extra_hosts %} - - {{ XTRAHOST }} - {% endfor %} - {% endif %} - - environment: - - REDMINE_DB_MYSQL={{ GLOBALS.manager }} - - REDMINE_DB_DATABASE=playbook - - REDMINE_DB_USERNAME=playbookdbuser - - REDMINE_DB_PASSWORD={{ PLAYBOOKPASS }} - {% if DOCKER.containers['so-playbook'].extra_env %} - {% for XTRAENV in DOCKER.containers['so-playbook'].extra_env %} - - {{ XTRAENV }} - {% endfor %} - {% endif %} - - port_bindings: - {% for BINDING in DOCKER.containers['so-playbook'].port_bindings %} - - {{ BINDING }} - {% endfor %} - -delete_so-playbook_so-status.disabled: - file.uncomment: - - name: /opt/so/conf/so-status/so-status.conf - - regex: ^so-playbook$ - -so-playbook-sync_cron: - cron.present: - - name: /usr/sbin/so-playbook-sync > /opt/so/log/playbook/sync.log 2>&1 - - identifier: so-playbook-sync_cron - - user: root - - minute: '*/5' - -so-playbook-ruleupdate_cron: - cron.present: - - name: /usr/sbin/so-playbook-ruleupdate > /opt/so/log/playbook/update.log 2>&1 - - identifier: so-playbook-ruleupdate_cron - - user: root - - minute: '1' - - hour: '6' - -{% endif %} - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - -{% endif %} diff --git a/salt/playbook/files/automation_user_create.sh b/salt/playbook/files/automation_user_create.sh deleted file mode 100644 index 782ce4c88..000000000 --- a/salt/playbook/files/automation_user_create.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/bash -# {%- set admin_pass = salt['pillar.get']('secrets:playbook_admin', None) -%} -# {%- set automation_pass = salt['pillar.get']('secrets:playbook_automation', None) %} - -local_salt_dir=/opt/so/saltstack/local - -try_count=6 -interval=10 - -while [[ $try_count -le 6 ]]; do - if docker top "so-playbook" &>/dev/null; then - automation_group=6 - - # Create user and retrieve api_key and user_id from response - mapfile -t automation_res < <( - curl -s --location --request POST 'http://127.0.0.1:3000/playbook/users.json' --user "admin:{{ admin_pass }}" --header 'Content-Type: application/json' --data '{ - "user" : { - "login" : "automation", - "password": "{{ automation_pass }}", - "firstname": "SecOps", - "lastname": "Automation", - "mail": "automation2@localhost.local" - } - }' | jq -r '.user.api_key, .user.id' - ) - - automation_api_key=${automation_res[0]} - automation_user_id=${automation_res[1]} - - # Add user_id from newly created user to Automation group - curl -s --location --request POST "http://127.0.0.1:3000/playbook/groups/${automation_group}/users.json" \ - --user "admin:{{ admin_pass }}" \ - --header 'Content-Type: application/json' \ - --data "{ - \"user_id\" : ${automation_user_id} - }" - - # Update the Automation API key in the secrets pillar - so-yaml.py remove $local_salt_dir/pillar/secrets.sls secrets.playbook_automation_api_key - printf '%s\n'\ - " playbook_automation_api_key: $automation_api_key" >> $local_salt_dir/pillar/secrets.sls - exit 0 - fi - ((try_count++)) - sleep "${interval}s" -done - -# Timeout exceeded, exit with non-zero exit code -exit 1 diff --git a/salt/playbook/files/playbook_db_init.sh b/salt/playbook/files/playbook_db_init.sh deleted file mode 100644 index 94aef0a44..000000000 --- a/salt/playbook/files/playbook_db_init.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# {%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) -%} -# {%- set admin_pass = salt['pillar.get']('secrets:playbook_admin', None) %} -. /usr/sbin/so-common - -default_salt_dir=/opt/so/saltstack/default - -# Generate salt + hash for admin user -admin_salt=$(get_random_value 32) -admin_stage1_hash=$(echo -n '{{ admin_pass }}' | sha1sum | awk '{print $1}') -admin_hash=$(echo -n "${admin_salt}${admin_stage1_hash}" | sha1sum | awk '{print $1}') -sed -i "s/ADMIN_HASH/${admin_hash}/g" $default_salt_dir/salt/playbook/files/playbook_db_init.sql -sed -i "s/ADMIN_SALT/${admin_salt}/g" $default_salt_dir/salt/playbook/files/playbook_db_init.sql - -# Copy file to destination + execute SQL -docker cp $default_salt_dir/salt/playbook/files/playbook_db_init.sql so-mysql:/tmp/playbook_db_init.sql -docker exec so-mysql /bin/bash -c "/usr/bin/mysql -b -uroot -p{{MYSQLPASS}} < /tmp/playbook_db_init.sql" diff --git a/salt/playbook/files/playbook_db_init.sql b/salt/playbook/files/playbook_db_init.sql deleted file mode 100644 index 7a3b4da68..000000000 --- a/salt/playbook/files/playbook_db_init.sql +++ /dev/null @@ -1,1788 +0,0 @@ --- MySQL dump 10.13 Distrib 5.7.24, for Linux (x86_64) --- --- Host: localhost Database: playbook --- ------------------------------------------------------ --- Server version 5.7.24 - -/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; -/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; -/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; -/*!40101 SET NAMES utf8 */; -/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; -/*!40103 SET TIME_ZONE='+00:00' */; -/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; -/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; -/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; -/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; - --- --- Current Database: `playbook` --- - -CREATE DATABASE /*!32312 IF NOT EXISTS*/ `playbook` /*!40100 DEFAULT CHARACTER SET latin1 */; - -USE `playbook`; - --- --- Table structure for table `ar_internal_metadata` --- - -DROP TABLE IF EXISTS `ar_internal_metadata`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `ar_internal_metadata` ( - `key` varchar(255) NOT NULL, - `value` varchar(255) DEFAULT NULL, - `created_at` datetime NOT NULL, - `updated_at` datetime NOT NULL, - PRIMARY KEY (`key`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `ar_internal_metadata` --- - -LOCK TABLES `ar_internal_metadata` WRITE; -/*!40000 ALTER TABLE `ar_internal_metadata` DISABLE KEYS */; -INSERT INTO `ar_internal_metadata` VALUES ('environment','production','2020-04-26 13:08:38','2020-04-26 13:08:38'); -/*!40000 ALTER TABLE `ar_internal_metadata` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `attachments` --- - -DROP TABLE IF EXISTS `attachments`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `attachments` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `container_id` int(11) DEFAULT NULL, - `container_type` varchar(30) DEFAULT NULL, - `filename` varchar(255) NOT NULL DEFAULT '', - `disk_filename` varchar(255) NOT NULL DEFAULT '', - `filesize` bigint(20) NOT NULL DEFAULT '0', - `content_type` varchar(255) DEFAULT '', - `digest` varchar(64) NOT NULL DEFAULT '', - `downloads` int(11) NOT NULL DEFAULT '0', - `author_id` int(11) NOT NULL DEFAULT '0', - `created_on` timestamp NULL DEFAULT NULL, - `description` varchar(255) DEFAULT NULL, - `disk_directory` varchar(255) DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `index_attachments_on_author_id` (`author_id`), - KEY `index_attachments_on_created_on` (`created_on`), - KEY `index_attachments_on_container_id_and_container_type` (`container_id`,`container_type`), - KEY `index_attachments_on_disk_filename` (`disk_filename`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `attachments` --- - -LOCK TABLES `attachments` WRITE; -/*!40000 ALTER TABLE `attachments` DISABLE KEYS */; -/*!40000 ALTER TABLE `attachments` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `auth_sources` --- - -DROP TABLE IF EXISTS `auth_sources`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `auth_sources` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `type` varchar(30) NOT NULL DEFAULT '', - `name` varchar(60) NOT NULL DEFAULT '', - `host` varchar(60) DEFAULT NULL, - `port` int(11) DEFAULT NULL, - `account` varchar(255) DEFAULT NULL, - `account_password` varchar(255) DEFAULT '', - `base_dn` varchar(255) DEFAULT NULL, - `attr_login` varchar(30) DEFAULT NULL, - `attr_firstname` varchar(30) DEFAULT NULL, - `attr_lastname` varchar(30) DEFAULT NULL, - `attr_mail` varchar(30) DEFAULT NULL, - `onthefly_register` tinyint(1) NOT NULL DEFAULT '0', - `tls` tinyint(1) NOT NULL DEFAULT '0', - `filter` text, - `timeout` int(11) DEFAULT NULL, - `verify_peer` tinyint(1) NOT NULL DEFAULT '1', - PRIMARY KEY (`id`), - KEY `index_auth_sources_on_id_and_type` (`id`,`type`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `auth_sources` --- - -LOCK TABLES `auth_sources` WRITE; -/*!40000 ALTER TABLE `auth_sources` DISABLE KEYS */; -/*!40000 ALTER TABLE `auth_sources` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `boards` --- - -DROP TABLE IF EXISTS `boards`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `boards` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `project_id` int(11) NOT NULL, - `name` varchar(255) NOT NULL DEFAULT '', - `description` varchar(255) DEFAULT NULL, - `position` int(11) DEFAULT NULL, - `topics_count` int(11) NOT NULL DEFAULT '0', - `messages_count` int(11) NOT NULL DEFAULT '0', - `last_message_id` int(11) DEFAULT NULL, - `parent_id` int(11) DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `boards_project_id` (`project_id`), - KEY `index_boards_on_last_message_id` (`last_message_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `boards` --- - -LOCK TABLES `boards` WRITE; -/*!40000 ALTER TABLE `boards` DISABLE KEYS */; -/*!40000 ALTER TABLE `boards` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `changes` --- - -DROP TABLE IF EXISTS `changes`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `changes` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `changeset_id` int(11) NOT NULL, - `action` varchar(1) NOT NULL DEFAULT '', - `path` text NOT NULL, - `from_path` text, - `from_revision` varchar(255) DEFAULT NULL, - `revision` varchar(255) DEFAULT NULL, - `branch` varchar(255) DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `changesets_changeset_id` (`changeset_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `changes` --- - -LOCK TABLES `changes` WRITE; -/*!40000 ALTER TABLE `changes` DISABLE KEYS */; -/*!40000 ALTER TABLE `changes` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `changeset_parents` --- - -DROP TABLE IF EXISTS `changeset_parents`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `changeset_parents` ( - `changeset_id` int(11) NOT NULL, - `parent_id` int(11) NOT NULL, - KEY `changeset_parents_changeset_ids` (`changeset_id`), - KEY `changeset_parents_parent_ids` (`parent_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `changeset_parents` --- - -LOCK TABLES `changeset_parents` WRITE; -/*!40000 ALTER TABLE `changeset_parents` DISABLE KEYS */; -/*!40000 ALTER TABLE `changeset_parents` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `changesets` --- - -DROP TABLE IF EXISTS `changesets`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `changesets` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `repository_id` int(11) NOT NULL, - `revision` varchar(255) NOT NULL, - `committer` varchar(255) DEFAULT NULL, - `committed_on` datetime NOT NULL, - `comments` longtext, - `commit_date` date DEFAULT NULL, - `scmid` varchar(255) DEFAULT NULL, - `user_id` int(11) DEFAULT NULL, - PRIMARY KEY (`id`), - UNIQUE KEY `changesets_repos_rev` (`repository_id`,`revision`), - KEY `index_changesets_on_user_id` (`user_id`), - KEY `index_changesets_on_repository_id` (`repository_id`), - KEY `index_changesets_on_committed_on` (`committed_on`), - KEY `changesets_repos_scmid` (`repository_id`,`scmid`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `changesets` --- - -LOCK TABLES `changesets` WRITE; -/*!40000 ALTER TABLE `changesets` DISABLE KEYS */; -/*!40000 ALTER TABLE `changesets` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `changesets_issues` --- - -DROP TABLE IF EXISTS `changesets_issues`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `changesets_issues` ( - `changeset_id` int(11) NOT NULL, - `issue_id` int(11) NOT NULL, - UNIQUE KEY `changesets_issues_ids` (`changeset_id`,`issue_id`), - KEY `index_changesets_issues_on_issue_id` (`issue_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `changesets_issues` --- - -LOCK TABLES `changesets_issues` WRITE; -/*!40000 ALTER TABLE `changesets_issues` DISABLE KEYS */; -/*!40000 ALTER TABLE `changesets_issues` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `comments` --- - -DROP TABLE IF EXISTS `comments`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `comments` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `commented_type` varchar(30) NOT NULL DEFAULT '', - `commented_id` int(11) NOT NULL DEFAULT '0', - `author_id` int(11) NOT NULL DEFAULT '0', - `content` text, - `created_on` datetime NOT NULL, - `updated_on` datetime NOT NULL, - PRIMARY KEY (`id`), - KEY `index_comments_on_commented_id_and_commented_type` (`commented_id`,`commented_type`), - KEY `index_comments_on_author_id` (`author_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `comments` --- - -LOCK TABLES `comments` WRITE; -/*!40000 ALTER TABLE `comments` DISABLE KEYS */; -/*!40000 ALTER TABLE `comments` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `custom_field_enumerations` --- - -DROP TABLE IF EXISTS `custom_field_enumerations`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `custom_field_enumerations` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `custom_field_id` int(11) NOT NULL, - `name` varchar(255) NOT NULL, - `active` tinyint(1) NOT NULL DEFAULT '1', - `position` int(11) NOT NULL DEFAULT '1', - PRIMARY KEY (`id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `custom_field_enumerations` --- - -LOCK TABLES `custom_field_enumerations` WRITE; -/*!40000 ALTER TABLE `custom_field_enumerations` DISABLE KEYS */; -/*!40000 ALTER TABLE `custom_field_enumerations` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `custom_fields` --- - -DROP TABLE IF EXISTS `custom_fields`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `custom_fields` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `type` varchar(30) NOT NULL DEFAULT '', - `name` varchar(30) NOT NULL DEFAULT '', - `field_format` varchar(30) NOT NULL DEFAULT '', - `possible_values` text, - `regexp` varchar(255) DEFAULT '', - `min_length` int(11) DEFAULT NULL, - `max_length` int(11) DEFAULT NULL, - `is_required` tinyint(1) NOT NULL DEFAULT '0', - `is_for_all` tinyint(1) NOT NULL DEFAULT '0', - `is_filter` tinyint(1) NOT NULL DEFAULT '0', - `position` int(11) DEFAULT NULL, - `searchable` tinyint(1) DEFAULT '0', - `default_value` text, - `editable` tinyint(1) DEFAULT '1', - `visible` tinyint(1) NOT NULL DEFAULT '1', - `multiple` tinyint(1) DEFAULT '0', - `format_store` text, - `description` text, - PRIMARY KEY (`id`), - KEY `index_custom_fields_on_id_and_type` (`id`,`type`) -) ENGINE=InnoDB AUTO_INCREMENT=27 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `custom_fields` --- - -LOCK TABLES `custom_fields` WRITE; -/*!40000 ALTER TABLE `custom_fields` DISABLE KEYS */; -INSERT INTO `custom_fields` VALUES (1,'IssueCustomField','Title','string',NULL,'',NULL,NULL,0,1,1,1,1,'',1,1,0,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\ntext_formatting: \'\'\nurl_pattern: \'\'\n',''),(2,'IssueCustomField','Author','string',NULL,'',NULL,NULL,0,1,1,2,1,'',1,1,0,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\ntext_formatting: \'\'\nurl_pattern: \'\'\n',''),(3,'IssueCustomField','Objective','text',NULL,'',NULL,NULL,0,1,1,14,1,'',1,1,0,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\ntext_formatting: \'\'\nfull_width_layout: \'1\'\n',''),(4,'IssueCustomField','Operational Notes','text',NULL,'',NULL,NULL,0,1,0,15,1,'',1,1,0,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\ntext_formatting: full\nfull_width_layout: \'1\'\n',''),(5,'IssueCustomField','Result Analysis','text',NULL,'',NULL,NULL,0,1,0,16,1,'',1,1,0,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\ntext_formatting: full\nfull_width_layout: \'1\'\n',''),(6,'IssueCustomField','ElastAlert Config','text',NULL,'',NULL,NULL,0,1,0,17,0,'',1,1,0,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\ntext_formatting: full\nfull_width_layout: \'1\'\n',''),(7,'IssueCustomField','HiveID','string',NULL,'',NULL,NULL,0,1,1,13,1,'',1,1,0,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\ntext_formatting: \'\'\nurl_pattern: \'\'\n',''),(8,'IssueCustomField','References','text',NULL,'',NULL,NULL,0,1,0,6,0,'',1,1,0,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\ntext_formatting: full\nfull_width_layout: \'0\'\n',''),(9,'IssueCustomField','Sigma','text',NULL,'',NULL,NULL,0,1,0,18,1,'',1,1,0,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\ntext_formatting: full\nfull_width_layout: \'1\'\n',''),(10,'IssueCustomField','Level','list','---\n- low\n- medium\n- high\n- critical\n','',NULL,NULL,0,1,1,3,1,'',1,1,0,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\nurl_pattern: \'\'\nedit_tag_style: \'\'\n',''),(11,'IssueCustomField','PlayID','string',NULL,'',NULL,NULL,0,1,1,8,1,'',1,1,0,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\ntext_formatting: \'\'\nurl_pattern: \'\'\n',''),(12,'IssueCustomField','Rule ID','string',NULL,'',NULL,NULL,0,1,1,9,1,'',1,1,0,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\ntext_formatting: \'\'\nurl_pattern: \'\'\n',''),(13,'IssueCustomField','Playbook','list','---\n- Internal\n- imported\n- community\n','',NULL,NULL,0,1,1,4,0,'',1,1,0,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\nurl_pattern: \'\'\nedit_tag_style: \'\'\n',''),(15,'IssueCustomField','ATT&CK Technique','list','---\n- T1001\n- T1002\n- T1003\n- T1004\n- T1005\n- T1006\n- T1007\n- T1008\n- T1009\n- T1010\n- T1011\n- T1012\n- T1013\n- T1014\n- T1015\n- T1016\n- T1017\n- T1018\n- T1019\n- T1020\n- T1021\n- T1022\n- T1023\n- T1024\n- T1025\n- T1026\n- T1027\n- T1028\n- T1029\n- T1030\n- T1031\n- T1032\n- T1033\n- T1034\n- T1035\n- T1036\n- T1037\n- T1038\n- T1039\n- T1040\n- T1041\n- T1042\n- T1043\n- T1044\n- T1045\n- T1046\n- T1047\n- T1048\n- T1049\n- T1050\n- T1051\n- T1052\n- T1053\n- T1054\n- T1055\n- T1056\n- T1057\n- T1058\n- T1059\n- T1060\n- T1061\n- T1062\n- T1063\n- T1064\n- T1065\n- T1066\n- T1067\n- T1068\n- T1069\n- T1070\n- T1071\n- T1072\n- T1073\n- T1074\n- T1075\n- T1076\n- T1077\n- T1078\n- T1079\n- T1080\n- T1081\n- T1082\n- T1083\n- T1084\n- T1085\n- T1086\n- T1087\n- T1088\n- T1089\n- T1090\n- T1091\n- T1092\n- T1093\n- T1094\n- T1095\n- T1096\n- T1097\n- T1098\n- T1099\n- T1100\n- T1101\n- T1102\n- T1103\n- T1104\n- T1105\n- T1106\n- T1107\n- T1108\n- T1109\n- T1110\n- T1111\n- T1112\n- T1113\n- T1114\n- T1115\n- T1116\n- T1117\n- T1118\n- T1119\n- T1120\n- T1121\n- T1122\n- T1123\n- T1124\n- T1125\n- T1126\n- T1127\n- T1128\n- T1129\n- T1130\n- T1131\n- T1132\n- T1133\n- T1134\n- T1135\n- T1136\n- T1137\n- T1138\n- T1139\n- T1140\n- T1141\n- T1142\n- T1143\n- T1144\n- T1145\n- T1146\n- T1147\n- T1148\n- T1149\n- T1150\n- T1151\n- T1152\n- T1153\n- T1154\n- T1155\n- T1156\n- T1157\n- T1158\n- T1159\n- T1160\n- T1161\n- T1162\n- T1163\n- T1164\n- T1165\n- T1166\n- T1167\n- T1168\n- T1169\n- T1170\n- T1171\n- T1172\n- T1173\n- T1174\n- T1175\n- T1176\n- T1177\n- T1178\n- T1179\n- T1180\n- T1181\n- T1182\n- T1183\n- T1184\n- T1185\n- T1186\n- T1187\n- T1188\n- T1189\n- T1190\n- T1191\n- T1192\n- T1193\n- T1194\n- T1195\n- T1196\n- T1197\n- T1198\n- T1199\n- T1200\n- T1201\n- T1202\n- T1203\n- T1204\n- T1205\n- T1206\n- T1207\n- T1208\n- T1209\n- T1210\n- T1211\n- T1212\n- T1213\n- T1214\n- T1215\n- T1216\n- T1217\n- T1218\n- T1219\n- T1220\n- T1221\n- T1222\n- T1223\n- T1480\n- T1482\n- T1483\n- T1484\n- T1485\n- T1486\n- T1487\n- T1488\n- T1489\n- T1490\n- T1491\n- T1492\n- T1493\n- T1494\n- T1495\n- T1496\n- T1497\n- T1498\n- T1499\n- T1500\n- T1501\n- T1502\n- T1503\n- T1504\n- T1505\n- T1506\n- T1514\n- T1518\n- T1519\n- T1522\n- T1525\n- T1526\n- T1527\n- T1528\n- T1529\n- T1530\n- T1531\n- T1534\n- T1535\n- T1536\n- T1537\n- T1538\n- T1539\n- T1540\n- T1541\n- T1542\n- T1543\n- T1544\n- T1545\n- T1546\n- T1547\n- T1548\n- T1549\n- T1550\n- T1551\n- T1552\n- T1553\n- T1554\n- T1555\n- T1556\n- T1557\n- T1558\n- T1559\n- T1560\n- T1561\n- T1562\n- T1563\n- T1564\n- T1565\n- T1566\n- T1567\n- T1568\n- T1569\n- T1570\n- T1571\n- T1572\n- T1573\n- T1574\n- T1575\n- T1576\n- T1577\n- T1578\n- T1579\n- T1580\n- T1581\n- T1582\n- T1583\n- T1584\n- T1585\n- T1586\n- T1587\n- T1588\n- T1589\n- T1590\n- T1591\n- T1592\n- T1593\n- T1594\n- T1595\n- T1596\n- T1597\n- T1598\n- T1599\n- T1600\n- T1601\n- T1602\n- T1603\n- T1604\n- T1605\n- T1606\n- T1607\n- T1608\n- T1609\n- T1610\n- T1611\n- T1612\n- T1613\n- T1614\n- T1615\n- T1616\n- T1617\n- T1618\n- T1619\n- T1620\n- T1621\n- T1622\n- T1623\n- T1624\n- T1625\n- T1626\n- T1627\n- T1628\n- T1629\n- T1630\n- T1631\n- T1632\n- T1633\n- T1634\n- T1635\n- T1636\n- T1637\n- T1638\n- T1639\n- T1640\n- T1641\n- T1642\n- T1643\n- T1644\n- T1645\n- T1646\n- T1647\n- T1648\n- T1649\n- T1650\n- T1651\n- T1652\n- T1653\n- T1654\n- T1655\n- T1656\n- T1657\n- T1658\n- T1659\n- T1660\n- T1661\n- T1662\n- T1663\n- T1664\n- T1665\n- T1666\n- T1667\n- T1668\n- T1669\n- T1670\n- T1671\n- T1672\n- T1673\n- T1674\n- T1675\n- T1676\n- T1677\n- T1678\n- T1679\n- T1680\n- T1681\n- T1682\n- T1683\n- T1684\n- T1685\n- T1686\n- T1687\n- T1688\n- T1689\n- T1690\n- T1691\n- T1692\n- T1693\n- T1694\n- T1695\n- T1696\n- T1697\n- T1698\n- T1699\n- T1700\n- T1701\n- T1702\n- T1703\n- T1704\n- T1705\n- T1706\n- T1707\n- T1708\n- T1709\n- T1710\n- T1711\n- T1712\n- T1713\n- T1714\n- T1715\n- T1716\n- T1717\n- T1718\n- T1719\n- T1720\n- T1721\n- T1722\n- T1723\n- T1724\n- T1725\n- T1726\n- T1727\n- T1728\n- T1729\n- T1730\n- T1731\n- T1732\n- T1733\n- T1734\n- T1735\n- T1736\n- T1737\n- T1738\n- T1739\n- T1740\n- T1741\n- T1742\n- T1743\n- T1744\n- T1745\n- T1746\n- T1747\n- T1748\n- T1749\n- T1750\n- T1751\n- T1752\n','',NULL,NULL,0,1,1,7,0,'',1,1,1,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\nurl_pattern: https://attack.mitre.org/techniques/%value%\nedit_tag_style: \'\'\n',''),(17,'IssueCustomField','Case Analyzers','list','---\n- Urlscan_io_Search - ip,domain,hash,url\n- CERTatPassiveDNS - domain,fqdn,ip\n','',NULL,NULL,0,1,1,12,1,'',1,1,1,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\nurl_pattern: \'\'\nedit_tag_style: \'\'\n',''),(18,'IssueCustomField','Ruleset','string',NULL,'',NULL,NULL,0,1,1,10,1,'',1,1,0,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\ntext_formatting: \'\'\nurl_pattern: \'\'\n',''),(19,'IssueCustomField','Group','string',NULL,'',NULL,NULL,0,1,1,11,1,'',1,1,0,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\ntext_formatting: \'\'\nurl_pattern: \'\'\n',''),(20,'IssueCustomField','Product','string',NULL,'',NULL,NULL,0,1,1,5,1,'',1,1,0,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\ntext_formatting: \'\'\nurl_pattern: \'\'\n',''),(21,'IssueCustomField','Target Log','text',NULL,'',NULL,NULL,0,1,0,19,0,'',1,1,0,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\ntext_formatting: full\nfull_width_layout: \'1\'\n',''),(22,'IssueCustomField','Unit Test','list','---\n- Passed\n- Failed\n','',NULL,NULL,0,1,1,20,1,'',1,1,0,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\nurl_pattern: \'\'\nedit_tag_style: \'\'\n',''),(26,'IssueCustomField','License','list','---\n- Apache-2.0\n- BSD-2-Clause\n- BSD-3-Clause\n- CC0-1.0\n- CC-PDDC\n- DRL-1.0\n- LGPL-3.0-only\n- MIT License\n- GPL-2.0-only\n- GPL-3.0-only\n','',NULL,NULL,0,1,0,21,0,'',1,1,1,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\nurl_pattern: https://spdx.org/licenses/%value%.html\nedit_tag_style: \'\'\n',''); -/*!40000 ALTER TABLE `custom_fields` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `custom_fields_projects` --- - -DROP TABLE IF EXISTS `custom_fields_projects`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `custom_fields_projects` ( - `custom_field_id` int(11) NOT NULL DEFAULT '0', - `project_id` int(11) NOT NULL DEFAULT '0', - UNIQUE KEY `index_custom_fields_projects_on_custom_field_id_and_project_id` (`custom_field_id`,`project_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `custom_fields_projects` --- - -LOCK TABLES `custom_fields_projects` WRITE; -/*!40000 ALTER TABLE `custom_fields_projects` DISABLE KEYS */; -/*!40000 ALTER TABLE `custom_fields_projects` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `custom_fields_roles` --- - -DROP TABLE IF EXISTS `custom_fields_roles`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `custom_fields_roles` ( - `custom_field_id` int(11) NOT NULL, - `role_id` int(11) NOT NULL, - UNIQUE KEY `custom_fields_roles_ids` (`custom_field_id`,`role_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `custom_fields_roles` --- - -LOCK TABLES `custom_fields_roles` WRITE; -/*!40000 ALTER TABLE `custom_fields_roles` DISABLE KEYS */; -/*!40000 ALTER TABLE `custom_fields_roles` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `custom_fields_trackers` --- - -DROP TABLE IF EXISTS `custom_fields_trackers`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `custom_fields_trackers` ( - `custom_field_id` int(11) NOT NULL DEFAULT '0', - `tracker_id` int(11) NOT NULL DEFAULT '0', - UNIQUE KEY `index_custom_fields_trackers_on_custom_field_id_and_tracker_id` (`custom_field_id`,`tracker_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `custom_fields_trackers` --- - -LOCK TABLES `custom_fields_trackers` WRITE; -/*!40000 ALTER TABLE `custom_fields_trackers` DISABLE KEYS */; -INSERT INTO `custom_fields_trackers` VALUES (1,1),(2,1),(3,1),(4,1),(5,1),(6,1),(7,1),(8,1),(9,1),(10,1),(11,1),(12,1),(13,1),(15,1),(17,1),(18,1),(19,1),(20,1),(21,1),(22,1),(26,1); -/*!40000 ALTER TABLE `custom_fields_trackers` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `custom_values` --- - -DROP TABLE IF EXISTS `custom_values`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `custom_values` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `customized_type` varchar(30) NOT NULL DEFAULT '', - `customized_id` int(11) NOT NULL DEFAULT '0', - `custom_field_id` int(11) NOT NULL DEFAULT '0', - `value` longtext, - PRIMARY KEY (`id`), - KEY `custom_values_customized` (`customized_type`,`customized_id`), - KEY `index_custom_values_on_custom_field_id` (`custom_field_id`) -) ENGINE=InnoDB AUTO_INCREMENT=145325 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `custom_values` --- - -LOCK TABLES `custom_values` WRITE; -/*!40000 ALTER TABLE `custom_values` DISABLE KEYS */; -/*!40000 ALTER TABLE `custom_values` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `documents` --- - -DROP TABLE IF EXISTS `documents`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `documents` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `project_id` int(11) NOT NULL DEFAULT '0', - `category_id` int(11) NOT NULL DEFAULT '0', - `title` varchar(255) NOT NULL DEFAULT '', - `description` text, - `created_on` timestamp NULL DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `documents_project_id` (`project_id`), - KEY `index_documents_on_category_id` (`category_id`), - KEY `index_documents_on_created_on` (`created_on`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `documents` --- - -LOCK TABLES `documents` WRITE; -/*!40000 ALTER TABLE `documents` DISABLE KEYS */; -/*!40000 ALTER TABLE `documents` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `email_addresses` --- - -DROP TABLE IF EXISTS `email_addresses`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `email_addresses` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `user_id` int(11) NOT NULL, - `address` varchar(255) NOT NULL, - `is_default` tinyint(1) NOT NULL DEFAULT '0', - `notify` tinyint(1) NOT NULL DEFAULT '1', - `created_on` datetime NOT NULL, - `updated_on` datetime NOT NULL, - PRIMARY KEY (`id`), - KEY `index_email_addresses_on_user_id` (`user_id`) -) ENGINE=InnoDB AUTO_INCREMENT=4 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `email_addresses` --- - -LOCK TABLES `email_addresses` WRITE; -/*!40000 ALTER TABLE `email_addresses` DISABLE KEYS */; -INSERT INTO `email_addresses` VALUES (1,1,'admin@example.net',1,1,'2020-04-26 13:08:38','2020-04-26 13:08:38'),(3,9,'automation@localhost.local',1,1,'2020-04-26 18:47:46','2020-04-26 18:47:46'); -/*!40000 ALTER TABLE `email_addresses` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `enabled_modules` --- - -DROP TABLE IF EXISTS `enabled_modules`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `enabled_modules` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `project_id` int(11) DEFAULT NULL, - `name` varchar(255) NOT NULL, - PRIMARY KEY (`id`), - KEY `enabled_modules_project_id` (`project_id`) -) ENGINE=InnoDB AUTO_INCREMENT=3 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `enabled_modules` --- - -LOCK TABLES `enabled_modules` WRITE; -/*!40000 ALTER TABLE `enabled_modules` DISABLE KEYS */; -INSERT INTO `enabled_modules` VALUES (1,1,'sigma_editor'),(2,1,'issue_tracking'); -/*!40000 ALTER TABLE `enabled_modules` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `enumerations` --- - -DROP TABLE IF EXISTS `enumerations`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `enumerations` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `name` varchar(30) NOT NULL DEFAULT '', - `position` int(11) DEFAULT NULL, - `is_default` tinyint(1) NOT NULL DEFAULT '0', - `type` varchar(255) DEFAULT NULL, - `active` tinyint(1) NOT NULL DEFAULT '1', - `project_id` int(11) DEFAULT NULL, - `parent_id` int(11) DEFAULT NULL, - `position_name` varchar(30) DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `index_enumerations_on_project_id` (`project_id`), - KEY `index_enumerations_on_id_and_type` (`id`,`type`) -) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `enumerations` --- - -LOCK TABLES `enumerations` WRITE; -/*!40000 ALTER TABLE `enumerations` DISABLE KEYS */; -INSERT INTO `enumerations` VALUES (1,'Normal',1,1,'IssuePriority',1,NULL,NULL,'default'); -/*!40000 ALTER TABLE `enumerations` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `groups_users` --- - -DROP TABLE IF EXISTS `groups_users`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `groups_users` ( - `group_id` int(11) NOT NULL, - `user_id` int(11) NOT NULL, - UNIQUE KEY `groups_users_ids` (`group_id`,`user_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `groups_users` --- - -LOCK TABLES `groups_users` WRITE; -/*!40000 ALTER TABLE `groups_users` DISABLE KEYS */; -INSERT INTO `groups_users` VALUES (7,1); -/*!40000 ALTER TABLE `groups_users` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `import_items` --- - -DROP TABLE IF EXISTS `import_items`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `import_items` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `import_id` int(11) NOT NULL, - `position` int(11) NOT NULL, - `obj_id` int(11) DEFAULT NULL, - `message` text, - `unique_id` varchar(255) DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `index_import_items_on_import_id_and_unique_id` (`import_id`,`unique_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `import_items` --- - -LOCK TABLES `import_items` WRITE; -/*!40000 ALTER TABLE `import_items` DISABLE KEYS */; -/*!40000 ALTER TABLE `import_items` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `imports` --- - -DROP TABLE IF EXISTS `imports`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `imports` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `type` varchar(255) DEFAULT NULL, - `user_id` int(11) NOT NULL, - `filename` varchar(255) DEFAULT NULL, - `settings` text, - `total_items` int(11) DEFAULT NULL, - `finished` tinyint(1) NOT NULL DEFAULT '0', - `created_at` datetime NOT NULL, - `updated_at` datetime NOT NULL, - PRIMARY KEY (`id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `imports` --- - -LOCK TABLES `imports` WRITE; -/*!40000 ALTER TABLE `imports` DISABLE KEYS */; -/*!40000 ALTER TABLE `imports` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `issue_categories` --- - -DROP TABLE IF EXISTS `issue_categories`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `issue_categories` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `project_id` int(11) NOT NULL DEFAULT '0', - `name` varchar(60) NOT NULL DEFAULT '', - `assigned_to_id` int(11) DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `issue_categories_project_id` (`project_id`), - KEY `index_issue_categories_on_assigned_to_id` (`assigned_to_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `issue_categories` --- - -LOCK TABLES `issue_categories` WRITE; -/*!40000 ALTER TABLE `issue_categories` DISABLE KEYS */; -/*!40000 ALTER TABLE `issue_categories` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `issue_relations` --- - -DROP TABLE IF EXISTS `issue_relations`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `issue_relations` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `issue_from_id` int(11) NOT NULL, - `issue_to_id` int(11) NOT NULL, - `relation_type` varchar(255) NOT NULL DEFAULT '', - `delay` int(11) DEFAULT NULL, - PRIMARY KEY (`id`), - UNIQUE KEY `index_issue_relations_on_issue_from_id_and_issue_to_id` (`issue_from_id`,`issue_to_id`), - KEY `index_issue_relations_on_issue_from_id` (`issue_from_id`), - KEY `index_issue_relations_on_issue_to_id` (`issue_to_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `issue_relations` --- - -LOCK TABLES `issue_relations` WRITE; -/*!40000 ALTER TABLE `issue_relations` DISABLE KEYS */; -/*!40000 ALTER TABLE `issue_relations` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `issue_statuses` --- - -DROP TABLE IF EXISTS `issue_statuses`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `issue_statuses` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `name` varchar(30) NOT NULL DEFAULT '', - `is_closed` tinyint(1) NOT NULL DEFAULT '0', - `position` int(11) DEFAULT NULL, - `default_done_ratio` int(11) DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `index_issue_statuses_on_position` (`position`), - KEY `index_issue_statuses_on_is_closed` (`is_closed`) -) ENGINE=InnoDB AUTO_INCREMENT=7 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `issue_statuses` --- - -LOCK TABLES `issue_statuses` WRITE; -/*!40000 ALTER TABLE `issue_statuses` DISABLE KEYS */; -INSERT INTO `issue_statuses` VALUES (2,'Draft',0,1,NULL),(3,'Active',0,2,NULL),(4,'Inactive',0,3,NULL),(5,'Archived',0,4,NULL),(6,'Disabled',0,5,NULL); -/*!40000 ALTER TABLE `issue_statuses` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `issues` --- - -DROP TABLE IF EXISTS `issues`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `issues` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `tracker_id` int(11) NOT NULL, - `project_id` int(11) NOT NULL, - `subject` varchar(255) NOT NULL DEFAULT '', - `description` longtext, - `due_date` date DEFAULT NULL, - `category_id` int(11) DEFAULT NULL, - `status_id` int(11) NOT NULL, - `assigned_to_id` int(11) DEFAULT NULL, - `priority_id` int(11) NOT NULL, - `fixed_version_id` int(11) DEFAULT NULL, - `author_id` int(11) NOT NULL, - `lock_version` int(11) NOT NULL DEFAULT '0', - `created_on` timestamp NULL DEFAULT NULL, - `updated_on` timestamp NULL DEFAULT NULL, - `start_date` date DEFAULT NULL, - `done_ratio` int(11) NOT NULL DEFAULT '0', - `estimated_hours` float DEFAULT NULL, - `parent_id` int(11) DEFAULT NULL, - `root_id` int(11) DEFAULT NULL, - `lft` int(11) DEFAULT NULL, - `rgt` int(11) DEFAULT NULL, - `is_private` tinyint(1) NOT NULL DEFAULT '0', - `closed_on` datetime DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `issues_project_id` (`project_id`), - KEY `index_issues_on_status_id` (`status_id`), - KEY `index_issues_on_category_id` (`category_id`), - KEY `index_issues_on_assigned_to_id` (`assigned_to_id`), - KEY `index_issues_on_fixed_version_id` (`fixed_version_id`), - KEY `index_issues_on_tracker_id` (`tracker_id`), - KEY `index_issues_on_priority_id` (`priority_id`), - KEY `index_issues_on_author_id` (`author_id`), - KEY `index_issues_on_created_on` (`created_on`), - KEY `index_issues_on_root_id_and_lft_and_rgt` (`root_id`,`lft`,`rgt`), - KEY `index_issues_on_parent_id` (`parent_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `issues` --- - -LOCK TABLES `issues` WRITE; -/*!40000 ALTER TABLE `issues` DISABLE KEYS */; -/*!40000 ALTER TABLE `issues` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `journal_details` --- - -DROP TABLE IF EXISTS `journal_details`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `journal_details` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `journal_id` int(11) NOT NULL DEFAULT '0', - `property` varchar(30) NOT NULL DEFAULT '', - `prop_key` varchar(30) NOT NULL DEFAULT '', - `old_value` longtext, - `value` longtext, - PRIMARY KEY (`id`), - KEY `journal_details_journal_id` (`journal_id`) -) ENGINE=InnoDB AUTO_INCREMENT=792 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `journal_details` --- - -LOCK TABLES `journal_details` WRITE; -/*!40000 ALTER TABLE `journal_details` DISABLE KEYS */; -/*!40000 ALTER TABLE `journal_details` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `journals` --- - -DROP TABLE IF EXISTS `journals`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `journals` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `journalized_id` int(11) NOT NULL DEFAULT '0', - `journalized_type` varchar(30) NOT NULL DEFAULT '', - `user_id` int(11) NOT NULL DEFAULT '0', - `notes` longtext, - `created_on` datetime NOT NULL, - `private_notes` tinyint(1) NOT NULL DEFAULT '0', - PRIMARY KEY (`id`), - KEY `journals_journalized_id` (`journalized_id`,`journalized_type`), - KEY `index_journals_on_user_id` (`user_id`), - KEY `index_journals_on_journalized_id` (`journalized_id`), - KEY `index_journals_on_created_on` (`created_on`) -) ENGINE=InnoDB AUTO_INCREMENT=9502 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `journals` --- - -LOCK TABLES `journals` WRITE; -/*!40000 ALTER TABLE `journals` DISABLE KEYS */; -/*!40000 ALTER TABLE `journals` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `member_roles` --- - -DROP TABLE IF EXISTS `member_roles`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `member_roles` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `member_id` int(11) NOT NULL, - `role_id` int(11) NOT NULL, - `inherited_from` int(11) DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `index_member_roles_on_member_id` (`member_id`), - KEY `index_member_roles_on_role_id` (`role_id`), - KEY `index_member_roles_on_inherited_from` (`inherited_from`) -) ENGINE=InnoDB AUTO_INCREMENT=8 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `member_roles` --- - -LOCK TABLES `member_roles` WRITE; -/*!40000 ALTER TABLE `member_roles` DISABLE KEYS */; -INSERT INTO `member_roles` VALUES (1,1,5,NULL),(2,2,3,NULL),(3,3,4,NULL),(4,4,5,1),(7,7,4,3); -/*!40000 ALTER TABLE `member_roles` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `members` --- - -DROP TABLE IF EXISTS `members`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `members` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `user_id` int(11) NOT NULL DEFAULT '0', - `project_id` int(11) NOT NULL DEFAULT '0', - `created_on` timestamp NULL DEFAULT NULL, - `mail_notification` tinyint(1) NOT NULL DEFAULT '0', - PRIMARY KEY (`id`), - UNIQUE KEY `index_members_on_user_id_and_project_id` (`user_id`,`project_id`), - KEY `index_members_on_user_id` (`user_id`), - KEY `index_members_on_project_id` (`project_id`) -) ENGINE=InnoDB AUTO_INCREMENT=8 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `members` --- - -LOCK TABLES `members` WRITE; -/*!40000 ALTER TABLE `members` DISABLE KEYS */; -INSERT INTO `members` VALUES (1,6,1,'2020-04-26 18:44:14',0),(2,5,1,'2020-04-26 18:44:23',0),(3,7,1,'2020-04-26 18:45:27',0),(4,9,1,'2020-04-26 18:47:51',0),(7,1,1,'2020-05-01 16:42:56',0); -/*!40000 ALTER TABLE `members` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `messages` --- - -DROP TABLE IF EXISTS `messages`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `messages` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `board_id` int(11) NOT NULL, - `parent_id` int(11) DEFAULT NULL, - `subject` varchar(255) NOT NULL DEFAULT '', - `content` text, - `author_id` int(11) DEFAULT NULL, - `replies_count` int(11) NOT NULL DEFAULT '0', - `last_reply_id` int(11) DEFAULT NULL, - `created_on` datetime NOT NULL, - `updated_on` datetime NOT NULL, - `locked` tinyint(1) DEFAULT '0', - `sticky` int(11) DEFAULT '0', - PRIMARY KEY (`id`), - KEY `messages_board_id` (`board_id`), - KEY `messages_parent_id` (`parent_id`), - KEY `index_messages_on_last_reply_id` (`last_reply_id`), - KEY `index_messages_on_author_id` (`author_id`), - KEY `index_messages_on_created_on` (`created_on`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `messages` --- - -LOCK TABLES `messages` WRITE; -/*!40000 ALTER TABLE `messages` DISABLE KEYS */; -/*!40000 ALTER TABLE `messages` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `news` --- - -DROP TABLE IF EXISTS `news`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `news` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `project_id` int(11) DEFAULT NULL, - `title` varchar(60) NOT NULL DEFAULT '', - `summary` varchar(255) DEFAULT '', - `description` text, - `author_id` int(11) NOT NULL DEFAULT '0', - `created_on` timestamp NULL DEFAULT NULL, - `comments_count` int(11) NOT NULL DEFAULT '0', - PRIMARY KEY (`id`), - KEY `news_project_id` (`project_id`), - KEY `index_news_on_author_id` (`author_id`), - KEY `index_news_on_created_on` (`created_on`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `news` --- - -LOCK TABLES `news` WRITE; -/*!40000 ALTER TABLE `news` DISABLE KEYS */; -/*!40000 ALTER TABLE `news` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `open_id_authentication_associations` --- - -DROP TABLE IF EXISTS `open_id_authentication_associations`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `open_id_authentication_associations` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `issued` int(11) DEFAULT NULL, - `lifetime` int(11) DEFAULT NULL, - `handle` varchar(255) DEFAULT NULL, - `assoc_type` varchar(255) DEFAULT NULL, - `server_url` blob, - `secret` blob, - PRIMARY KEY (`id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `open_id_authentication_associations` --- - -LOCK TABLES `open_id_authentication_associations` WRITE; -/*!40000 ALTER TABLE `open_id_authentication_associations` DISABLE KEYS */; -/*!40000 ALTER TABLE `open_id_authentication_associations` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `open_id_authentication_nonces` --- - -DROP TABLE IF EXISTS `open_id_authentication_nonces`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `open_id_authentication_nonces` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `timestamp` int(11) NOT NULL, - `server_url` varchar(255) DEFAULT NULL, - `salt` varchar(255) NOT NULL, - PRIMARY KEY (`id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `open_id_authentication_nonces` --- - -LOCK TABLES `open_id_authentication_nonces` WRITE; -/*!40000 ALTER TABLE `open_id_authentication_nonces` DISABLE KEYS */; -/*!40000 ALTER TABLE `open_id_authentication_nonces` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `projects` --- - -DROP TABLE IF EXISTS `projects`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `projects` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `name` varchar(255) NOT NULL DEFAULT '', - `description` text, - `homepage` varchar(255) DEFAULT '', - `is_public` tinyint(1) NOT NULL DEFAULT '1', - `parent_id` int(11) DEFAULT NULL, - `created_on` timestamp NULL DEFAULT NULL, - `updated_on` timestamp NULL DEFAULT NULL, - `identifier` varchar(255) DEFAULT NULL, - `status` int(11) NOT NULL DEFAULT '1', - `lft` int(11) DEFAULT NULL, - `rgt` int(11) DEFAULT NULL, - `inherit_members` tinyint(1) NOT NULL DEFAULT '0', - `default_version_id` int(11) DEFAULT NULL, - `default_assigned_to_id` int(11) DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `index_projects_on_lft` (`lft`), - KEY `index_projects_on_rgt` (`rgt`) -) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `projects` --- - -LOCK TABLES `projects` WRITE; -/*!40000 ALTER TABLE `projects` DISABLE KEYS */; -INSERT INTO `projects` VALUES (1,'Detection Playbooks','','',1,NULL,'2020-04-26 13:13:01','2020-07-10 19:33:53','detection-playbooks',1,1,2,0,NULL,NULL); -/*!40000 ALTER TABLE `projects` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `projects_trackers` --- - -DROP TABLE IF EXISTS `projects_trackers`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `projects_trackers` ( - `project_id` int(11) NOT NULL DEFAULT '0', - `tracker_id` int(11) NOT NULL DEFAULT '0', - UNIQUE KEY `projects_trackers_unique` (`project_id`,`tracker_id`), - KEY `projects_trackers_project_id` (`project_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `projects_trackers` --- - -LOCK TABLES `projects_trackers` WRITE; -/*!40000 ALTER TABLE `projects_trackers` DISABLE KEYS */; -INSERT INTO `projects_trackers` VALUES (1,1); -/*!40000 ALTER TABLE `projects_trackers` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `queries` --- - -DROP TABLE IF EXISTS `queries`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `queries` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `project_id` int(11) DEFAULT NULL, - `name` varchar(255) NOT NULL DEFAULT '', - `filters` text, - `user_id` int(11) NOT NULL DEFAULT '0', - `column_names` text, - `sort_criteria` text, - `group_by` varchar(255) DEFAULT NULL, - `type` varchar(255) DEFAULT NULL, - `visibility` int(11) DEFAULT '0', - `options` text, - PRIMARY KEY (`id`), - KEY `index_queries_on_project_id` (`project_id`), - KEY `index_queries_on_user_id` (`user_id`) -) ENGINE=InnoDB AUTO_INCREMENT=10 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `queries` --- - -LOCK TABLES `queries` WRITE; -/*!40000 ALTER TABLE `queries` DISABLE KEYS */; -INSERT INTO `queries` VALUES (3,1,'All Plays','---\ntracker_id:\n :operator: \"=\"\n :values:\n - \'1\'\n',1,NULL,'---\n- - id\n - desc\n','','IssueQuery',2,'---\n:totalable_names: []\n:display_type: list\n:draw_relations: \n:draw_progress_line: \n:draw_selected_columns: \n'),(4,NULL,'Inactive Plays','---\nstatus_id:\n :operator: \"=\"\n :values:\n - \'4\'\n',1,NULL,'---\n- - id\n - desc\n','','IssueQuery',2,'---\n:totalable_names: []\n:display_type: list\n:draw_relations: \n:draw_progress_line: \n:draw_selected_columns: \n'),(5,NULL,'Draft Plays','---\nstatus_id:\n :operator: \"=\"\n :values:\n - \'2\'\n',1,NULL,'---\n- - id\n - desc\n','','IssueQuery',2,'---\n:totalable_names: []\n:display_type: list\n:draw_relations: \n:draw_progress_line: \n:draw_selected_columns: \n'),(6,NULL,'Playbook - Community Sigma','---\ncf_13:\n :operator: \"=\"\n :values:\n - community\n',1,'---\n- :status\n- :cf_10\n- :cf_18\n- :cf_19\n- :cf_20\n- :cf_1\n- :updated_on\n','---\n- - id\n - desc\n','','IssueQuery',2,'---\n:totalable_names: []\n:display_type: list\n:draw_relations: \n:draw_progress_line: \n:draw_selected_columns: \n'),(8,NULL,'Playbook - Internal','---\ncf_13:\n :operator: \"=\"\n :values:\n - Internal\n',1,'---\n- :status\n- :cf_10\n- :cf_14\n- :cf_16\n- :cf_1\n- :updated_on\n','---\n- - id\n - desc\n','','IssueQuery',2,'---\n:totalable_names: []\n:display_type: list\n:draw_relations: \n:draw_progress_line: \n:draw_selected_columns: \n'),(9,NULL,'Active Plays','---\ntracker_id:\n :operator: \"=\"\n :values:\n - \'1\'\nstatus_id:\n :operator: \"=\"\n :values:\n - \'3\'\n',1,'---\n- :status\n- :cf_10\n- :cf_13\n- :cf_18\n- :cf_19\n- :cf_1\n- :updated_on\n','---\n- - id\n - desc\n','','IssueQuery',2,'---\n:totalable_names: []\n:display_type: list\n:draw_relations: \n:draw_progress_line: \n:draw_selected_columns: \n'); -/*!40000 ALTER TABLE `queries` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `queries_roles` --- - -DROP TABLE IF EXISTS `queries_roles`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `queries_roles` ( - `query_id` int(11) NOT NULL, - `role_id` int(11) NOT NULL, - UNIQUE KEY `queries_roles_ids` (`query_id`,`role_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `queries_roles` --- - -LOCK TABLES `queries_roles` WRITE; -/*!40000 ALTER TABLE `queries_roles` DISABLE KEYS */; -/*!40000 ALTER TABLE `queries_roles` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `repositories` --- - -DROP TABLE IF EXISTS `repositories`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `repositories` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `project_id` int(11) NOT NULL DEFAULT '0', - `url` varchar(255) NOT NULL DEFAULT '', - `login` varchar(60) DEFAULT '', - `password` varchar(255) DEFAULT '', - `root_url` varchar(255) DEFAULT '', - `type` varchar(255) DEFAULT NULL, - `path_encoding` varchar(64) DEFAULT NULL, - `log_encoding` varchar(64) DEFAULT NULL, - `extra_info` longtext, - `identifier` varchar(255) DEFAULT NULL, - `is_default` tinyint(1) DEFAULT '0', - `created_on` timestamp NULL DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `index_repositories_on_project_id` (`project_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `repositories` --- - -LOCK TABLES `repositories` WRITE; -/*!40000 ALTER TABLE `repositories` DISABLE KEYS */; -/*!40000 ALTER TABLE `repositories` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `roles` --- - -DROP TABLE IF EXISTS `roles`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `roles` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `name` varchar(255) NOT NULL DEFAULT '', - `position` int(11) DEFAULT NULL, - `assignable` tinyint(1) DEFAULT '1', - `builtin` int(11) NOT NULL DEFAULT '0', - `permissions` text, - `issues_visibility` varchar(30) NOT NULL DEFAULT 'default', - `users_visibility` varchar(30) NOT NULL DEFAULT 'all', - `time_entries_visibility` varchar(30) NOT NULL DEFAULT 'all', - `all_roles_managed` tinyint(1) NOT NULL DEFAULT '1', - `settings` text, - PRIMARY KEY (`id`) -) ENGINE=InnoDB AUTO_INCREMENT=6 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `roles` --- - -LOCK TABLES `roles` WRITE; -/*!40000 ALTER TABLE `roles` DISABLE KEYS */; -INSERT INTO `roles` VALUES (1,'Non member',0,1,1,NULL,'default','all','all',1,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\npermissions_all_trackers: !ruby/hash:ActiveSupport::HashWithIndifferentAccess\n view_issues: \'0\'\n add_issues: \'1\'\n edit_issues: \'1\'\n add_issue_notes: \'1\'\npermissions_tracker_ids: !ruby/hash:ActiveSupport::HashWithIndifferentAccess\n view_issues: []\n add_issues: []\n edit_issues: []\n add_issue_notes: []\n'),(2,'Anonymous',0,1,2,'---\n- :view_issues\n- :edit_issues\n- :add_issue_notes\n- :sigma_editor\n','default','all','all',1,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\npermissions_all_trackers: !ruby/hash:ActiveSupport::HashWithIndifferentAccess\n view_issues: \'1\'\n add_issues: \'1\'\n edit_issues: \'1\'\n add_issue_notes: \'1\'\npermissions_tracker_ids: !ruby/hash:ActiveSupport::HashWithIndifferentAccess\n view_issues: []\n add_issues: []\n edit_issues: []\n add_issue_notes: []\n'),(3,'Security-Analyst',1,0,0,'---\n- :save_queries\n- :view_issues\n- :edit_issues\n- :add_issue_notes\n- :edit_issue_notes\n- :sigma_editor\n','all','all','all',1,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\npermissions_all_trackers: !ruby/hash:ActiveSupport::HashWithIndifferentAccess\n view_issues: \'1\'\n add_issues: \'1\'\n edit_issues: \'1\'\n add_issue_notes: \'1\'\n delete_issues: \'1\'\npermissions_tracker_ids: !ruby/hash:ActiveSupport::HashWithIndifferentAccess\n view_issues: []\n add_issues: []\n edit_issues: []\n add_issue_notes: []\n delete_issues: []\n'),(4,'SuperAdmin',2,0,0,'---\n- :add_project\n- :edit_project\n- :close_project\n- :select_project_modules\n- :manage_members\n- :manage_versions\n- :add_subprojects\n- :manage_public_queries\n- :save_queries\n- :manage_hook\n- :view_messages\n- :add_messages\n- :edit_messages\n- :edit_own_messages\n- :delete_messages\n- :delete_own_messages\n- :manage_boards\n- :view_calendar\n- :view_documents\n- :add_documents\n- :edit_documents\n- :delete_documents\n- :view_files\n- :manage_files\n- :view_gantt\n- :view_issues\n- :edit_issues\n- :edit_own_issues\n- :copy_issues\n- :manage_issue_relations\n- :manage_subtasks\n- :set_issues_private\n- :set_own_issues_private\n- :add_issue_notes\n- :edit_issue_notes\n- :edit_own_issue_notes\n- :view_private_notes\n- :set_notes_private\n- :delete_issues\n- :view_issue_watchers\n- :add_issue_watchers\n- :delete_issue_watchers\n- :import_issues\n- :manage_categories\n- :view_news\n- :manage_news\n- :comment_news\n- :view_changesets\n- :browse_repository\n- :commit_access\n- :manage_related_issues\n- :manage_repository\n- :sigma_editor\n- :view_time_entries\n- :log_time\n- :edit_time_entries\n- :edit_own_time_entries\n- :manage_project_activities\n- :log_time_for_other_users\n- :import_time_entries\n- :view_wiki_pages\n- :view_wiki_edits\n- :export_wiki_pages\n- :edit_wiki_pages\n- :rename_wiki_pages\n- :delete_wiki_pages\n- :delete_wiki_pages_attachments\n- :protect_wiki_pages\n- :manage_wiki\n','default','all','all',1,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\npermissions_all_trackers: !ruby/hash:ActiveSupport::HashWithIndifferentAccess\n view_issues: \'1\'\n add_issues: \'1\'\n edit_issues: \'1\'\n add_issue_notes: \'1\'\n delete_issues: \'1\'\npermissions_tracker_ids: !ruby/hash:ActiveSupport::HashWithIndifferentAccess\n view_issues: []\n add_issues: []\n edit_issues: []\n add_issue_notes: []\n delete_issues: []\n'),(5,'Automation',3,0,0,'---\n- :view_issues\n- :add_issues\n- :edit_issues\n- :add_issue_notes\n- :edit_issue_notes\n- :import_issues\n- :sigma_editor\n','default','all','all',1,'--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\npermissions_all_trackers: !ruby/hash:ActiveSupport::HashWithIndifferentAccess\n view_issues: \'1\'\n add_issues: \'1\'\n edit_issues: \'1\'\n add_issue_notes: \'1\'\n delete_issues: \'1\'\npermissions_tracker_ids: !ruby/hash:ActiveSupport::HashWithIndifferentAccess\n view_issues: []\n add_issues: []\n edit_issues: []\n add_issue_notes: []\n delete_issues: []\n'); -/*!40000 ALTER TABLE `roles` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `roles_managed_roles` --- - -DROP TABLE IF EXISTS `roles_managed_roles`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `roles_managed_roles` ( - `role_id` int(11) NOT NULL, - `managed_role_id` int(11) NOT NULL, - UNIQUE KEY `index_roles_managed_roles_on_role_id_and_managed_role_id` (`role_id`,`managed_role_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `roles_managed_roles` --- - -LOCK TABLES `roles_managed_roles` WRITE; -/*!40000 ALTER TABLE `roles_managed_roles` DISABLE KEYS */; -/*!40000 ALTER TABLE `roles_managed_roles` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `schema_migrations` --- - -DROP TABLE IF EXISTS `schema_migrations`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `schema_migrations` ( - `version` varchar(255) NOT NULL, - PRIMARY KEY (`version`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `schema_migrations` --- - -LOCK TABLES `schema_migrations` WRITE; -/*!40000 ALTER TABLE `schema_migrations` DISABLE KEYS */; -INSERT INTO `schema_migrations` VALUES ('1'),('1-redmine_webhook'),('10'),('100'),('101'),('102'),('103'),('104'),('105'),('106'),('107'),('108'),('11'),('12'),('13'),('14'),('15'),('16'),('17'),('18'),('19'),('2'),('20'),('20090214190337'),('20090312172426'),('20090312194159'),('20090318181151'),('20090323224724'),('20090401221305'),('20090401231134'),('20090403001910'),('20090406161854'),('20090425161243'),('20090503121501'),('20090503121505'),('20090503121510'),('20090614091200'),('20090704172350'),('20090704172355'),('20090704172358'),('20091010093521'),('20091017212227'),('20091017212457'),('20091017212644'),('20091017212938'),('20091017213027'),('20091017213113'),('20091017213151'),('20091017213228'),('20091017213257'),('20091017213332'),('20091017213444'),('20091017213536'),('20091017213642'),('20091017213716'),('20091017213757'),('20091017213835'),('20091017213910'),('20091017214015'),('20091017214107'),('20091017214136'),('20091017214236'),('20091017214308'),('20091017214336'),('20091017214406'),('20091017214440'),('20091017214519'),('20091017214611'),('20091017214644'),('20091017214720'),('20091017214750'),('20091025163651'),('20091108092559'),('20091114105931'),('20091123212029'),('20091205124427'),('20091220183509'),('20091220183727'),('20091220184736'),('20091225164732'),('20091227112908'),('20100129193402'),('20100129193813'),('20100221100219'),('20100313132032'),('20100313171051'),('20100705164950'),('20100819172912'),('20101104182107'),('20101107130441'),('20101114115114'),('20101114115359'),('20110220160626'),('20110223180944'),('20110223180953'),('20110224000000'),('20110226120112'),('20110226120132'),('20110227125750'),('20110228000000'),('20110228000100'),('20110401192910'),('20110408103312'),('20110412065600'),('20110511000000'),('20110902000000'),('20111201201315'),('20120115143024'),('20120115143100'),('20120115143126'),('20120127174243'),('20120205111326'),('20120223110929'),('20120301153455'),('20120422150750'),('20120705074331'),('20120707064544'),('20120714122000'),('20120714122100'),('20120714122200'),('20120731164049'),('20120930112914'),('20121026002032'),('20121026003537'),('20121209123234'),('20121209123358'),('20121213084931'),('20130110122628'),('20130201184705'),('20130202090625'),('20130207175206'),('20130207181455'),('20130215073721'),('20130215111127'),('20130215111141'),('20130217094251'),('20130602092539'),('20130710182539'),('20130713104233'),('20130713111657'),('20130729070143'),('20130911193200'),('20131004113137'),('20131005100610'),('20131124175346'),('20131210180802'),('20131214094309'),('20131215104612'),('20131218183023'),('20140228130325'),('20140903143914'),('20140920094058'),('20141029181752'),('20141029181824'),('20141109112308'),('20141122124142'),('20150113194759'),('20150113211532'),('20150113213922'),('20150113213955'),('20150208105930'),('20150510083747'),('20150525103953'),('20150526183158'),('20150528084820'),('20150528092912'),('20150528093249'),('20150725112753'),('20150730122707'),('20150730122735'),('20150921204850'),('20150921210243'),('20151020182334'),('20151020182731'),('20151021184614'),('20151021185456'),('20151021190616'),('20151024082034'),('20151025072118'),('20151031095005'),('20160404080304'),('20160416072926'),('20160529063352'),('20161001122012'),('20161002133421'),('20161010081301'),('20161010081528'),('20161010081600'),('20161126094932'),('20161220091118'),('20170207050700'),('20170302015225'),('20170309214320'),('20170320051650'),('20170418090031'),('20170419144536'),('20170723112801'),('20180501132547'),('20180913072918'),('20180923082945'),('20180923091603'),('20190315094151'),('20190315102101'),('20190510070108'),('20190620135549'),('21'),('22'),('23'),('24'),('25'),('26'),('27'),('28'),('29'),('3'),('30'),('31'),('32'),('33'),('34'),('35'),('36'),('37'),('38'),('39'),('4'),('40'),('41'),('42'),('43'),('44'),('45'),('46'),('47'),('48'),('49'),('5'),('50'),('51'),('52'),('53'),('54'),('55'),('56'),('57'),('58'),('59'),('6'),('60'),('61'),('62'),('63'),('64'),('65'),('66'),('67'),('68'),('69'),('7'),('70'),('71'),('72'),('73'),('74'),('75'),('76'),('77'),('78'),('79'),('8'),('80'),('81'),('82'),('83'),('84'),('85'),('86'),('87'),('88'),('89'),('9'),('90'),('91'),('92'),('93'),('94'),('95'),('96'),('97'),('98'),('99'); -/*!40000 ALTER TABLE `schema_migrations` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `settings` --- - -DROP TABLE IF EXISTS `settings`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `settings` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `name` varchar(255) NOT NULL DEFAULT '', - `value` text, - `updated_on` timestamp NULL DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `index_settings_on_name` (`name`) -) ENGINE=InnoDB AUTO_INCREMENT=71 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `settings` --- - -LOCK TABLES `settings` WRITE; -/*!40000 ALTER TABLE `settings` DISABLE KEYS */; -INSERT INTO `settings` VALUES (1,'ui_theme','circle','2020-04-26 13:11:26'),(2,'default_language','en','2020-04-26 13:11:26'),(3,'force_default_language_for_anonymous','0','2020-04-26 13:11:26'),(4,'force_default_language_for_loggedin','0','2020-04-26 13:11:26'),(5,'start_of_week','','2020-04-26 13:11:26'),(6,'date_format','','2020-04-26 13:11:26'),(7,'time_format','','2020-04-26 13:11:26'),(8,'timespan_format','decimal','2020-04-26 13:11:26'),(9,'user_format','firstname_lastname','2020-05-02 12:45:00'),(10,'gravatar_enabled','1','2020-05-02 12:41:07'),(11,'thumbnails_enabled','1','2020-04-26 13:11:26'),(12,'thumbnails_size','100','2020-04-26 13:11:26'),(13,'new_item_menu_tab','0','2020-04-26 13:11:30'),(14,'login_required','0','2020-07-10 19:32:45'),(15,'autologin','0','2020-04-26 13:11:54'),(16,'self_registration','0','2020-04-26 13:11:54'),(17,'show_custom_fields_on_registration','0','2020-04-26 13:11:54'),(18,'password_min_length','8','2020-04-26 13:11:54'),(19,'password_required_char_classes','--- []\n','2020-04-26 13:11:54'),(20,'password_max_age','0','2020-04-26 13:11:54'),(21,'lost_password','1','2020-04-26 13:11:54'),(22,'openid','0','2020-04-26 13:11:55'),(23,'session_lifetime','0','2020-04-26 13:11:55'),(24,'session_timeout','0','2020-04-26 13:11:55'),(25,'rest_api_enabled','1','2020-04-26 13:11:58'),(26,'jsonp_enabled','0','2020-04-26 13:11:58'),(27,'default_projects_public','0','2020-04-26 13:12:21'),(28,'default_projects_modules','---\n- sigma_editor\n','2020-04-26 13:12:21'),(29,'default_projects_tracker_ids','--- []\n','2020-04-26 13:12:21'),(30,'sequential_project_identifiers','0','2020-04-26 13:12:21'),(31,'project_list_defaults','---\n:column_names:\n- name\n- identifier\n- short_description\n','2020-04-26 13:12:21'),(32,'app_title','Playbook','2020-04-26 18:17:51'),(33,'welcome_text','','2020-04-26 18:17:51'),(34,'per_page_options','25,75,150','2020-05-02 12:41:38'),(35,'search_results_per_page','10','2020-04-26 18:17:51'),(36,'activity_days_default','30','2020-04-26 18:17:51'),(37,'host_name','localhost:3000','2020-04-26 18:17:51'),(38,'protocol','http','2020-04-26 18:17:51'),(39,'text_formatting','textile','2020-04-26 18:17:51'),(40,'cache_formatted_text','0','2020-04-26 18:17:51'),(41,'wiki_compression','','2020-04-26 18:17:51'),(42,'feeds_limit','15','2020-04-26 18:17:51'),(43,'plugin_redmine_playbook','--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\nproject: \'1\'\nconvert_url: http://10.66.166.135:7000/playbook/sigmac\ncreate_url: http://10.66.166.135:7000/playbook/play','2020-05-02 12:39:20'),(44,'cross_project_issue_relations','0','2020-05-01 16:27:33'),(45,'link_copied_issue','no','2020-05-01 16:27:33'),(46,'cross_project_subtasks','','2020-05-01 16:27:33'),(47,'close_duplicate_issues','0','2020-05-01 16:27:33'),(48,'issue_group_assignment','0','2020-05-01 16:27:33'),(49,'default_issue_start_date_to_creation_date','1','2020-05-01 16:27:33'),(50,'display_subprojects_issues','0','2020-05-01 16:27:33'),(51,'issue_done_ratio','issue_field','2020-05-01 16:27:33'),(52,'non_working_week_days','---\n- \'6\'\n- \'7\'\n','2020-05-01 16:27:33'),(53,'issues_export_limit','500','2020-05-01 16:27:33'),(54,'gantt_items_limit','500','2020-05-01 16:27:33'),(55,'gantt_months_limit','24','2020-05-01 16:27:33'),(56,'parent_issue_dates','derived','2020-05-01 16:27:33'),(57,'parent_issue_priority','derived','2020-05-01 16:27:33'),(58,'parent_issue_done_ratio','derived','2020-05-01 16:27:33'),(59,'issue_list_default_columns','---\n- status\n- cf_10\n- cf_13\n- cf_14\n- cf_1\n- updated_on\n','2020-05-01 19:32:13'),(60,'issue_list_default_totals','--- []\n','2020-05-01 16:27:33'),(61,'enabled_scm','--- []\n','2020-05-01 16:27:47'),(62,'autofetch_changesets','0','2020-05-01 16:27:47'),(63,'sys_api_enabled','0','2020-05-01 16:27:47'),(64,'repository_log_display_limit','100','2020-05-01 16:27:47'),(65,'commit_logs_formatting','1','2020-05-01 16:27:47'),(66,'commit_ref_keywords','refs,references,IssueID','2020-05-01 16:27:47'),(67,'commit_cross_project_ref','0','2020-05-01 16:27:47'),(68,'commit_logtime_enabled','0','2020-05-01 16:27:47'),(69,'commit_update_keywords','--- []\n','2020-05-01 16:27:47'),(70,'gravatar_default','','2020-05-02 12:41:07'); -/*!40000 ALTER TABLE `settings` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `time_entries` --- - -DROP TABLE IF EXISTS `time_entries`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `time_entries` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `project_id` int(11) NOT NULL, - `author_id` int(11) DEFAULT NULL, - `user_id` int(11) NOT NULL, - `issue_id` int(11) DEFAULT NULL, - `hours` float NOT NULL, - `comments` varchar(1024) DEFAULT NULL, - `activity_id` int(11) NOT NULL, - `spent_on` date NOT NULL, - `tyear` int(11) NOT NULL, - `tmonth` int(11) NOT NULL, - `tweek` int(11) NOT NULL, - `created_on` datetime NOT NULL, - `updated_on` datetime NOT NULL, - PRIMARY KEY (`id`), - KEY `time_entries_project_id` (`project_id`), - KEY `time_entries_issue_id` (`issue_id`), - KEY `index_time_entries_on_activity_id` (`activity_id`), - KEY `index_time_entries_on_user_id` (`user_id`), - KEY `index_time_entries_on_created_on` (`created_on`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `time_entries` --- - -LOCK TABLES `time_entries` WRITE; -/*!40000 ALTER TABLE `time_entries` DISABLE KEYS */; -/*!40000 ALTER TABLE `time_entries` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `tokens` --- - -DROP TABLE IF EXISTS `tokens`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `tokens` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `user_id` int(11) NOT NULL DEFAULT '0', - `action` varchar(30) NOT NULL DEFAULT '', - `value` varchar(40) NOT NULL DEFAULT '', - `created_on` datetime NOT NULL, - `updated_on` timestamp NULL DEFAULT NULL, - PRIMARY KEY (`id`), - UNIQUE KEY `tokens_value` (`value`), - KEY `index_tokens_on_user_id` (`user_id`) -) ENGINE=InnoDB AUTO_INCREMENT=67 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `tokens` --- - -LOCK TABLES `tokens` WRITE; -/*!40000 ALTER TABLE `tokens` DISABLE KEYS */; -INSERT INTO `tokens` - VALUES - (3,1,'feeds','6e5575602e1227c188cd85ef6d12608bb8701193','2020-04-26 13:10:46','2020-04-26 13:10:46'), - (4,1,'session','999412fa9badda7423c6c654d6364c32c20b3eac','2020-04-26 18:07:03','2020-04-26 18:12:02'), - (5,1,'session','124ad4acbf87a942426350e7ad028c1d119c3851','2020-04-26 18:17:11','2020-04-26 18:19:24'), - (9,1,'session','2890c663e0552f26ddb92acad6ab3b6d05b92915','2020-04-26 18:51:15','2020-04-26 18:51:15'), - (19,1,'session','b7ffb106ea0b34650dd9c1770f74c2b0ffe166b2','2020-05-01 16:52:33','2020-05-01 18:02:30'), - (20,1,'session','f44cfcf918eef59ffda47991c431d9c2b2ac6113','2020-05-01 18:05:56','2020-05-01 18:05:56'), - (23,9,'feeds','211918c9d7168979b5dc19bebb14573b928a5067','2020-05-01 18:26:17','2020-05-01 18:26:17'), - (46,1,'session','2d0c8f8ae641c06d8c2362746846440d465d53c0','2020-05-06 20:48:01','2020-05-06 20:48:07'), - (59,1,'session','2afe6590653d59a697d1436729c64f322a2eff82','2020-07-01 18:11:07','2020-07-01 20:30:43'), - (61,1,'session','b01f95709ca1ab086a049cf9c5afd81ca9d4526e','2020-07-15 16:30:42','2020-07-15 16:31:40'), - (62,1,'session','d29acdcd0b8e4ebf78ef8f696d3e76df7e2ab2ac','2020-08-17 14:51:59','2020-08-17 14:53:22'); -/*!40000 ALTER TABLE `tokens` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `trackers` --- - -DROP TABLE IF EXISTS `trackers`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `trackers` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `name` varchar(30) NOT NULL DEFAULT '', - `description` varchar(255) DEFAULT NULL, - `is_in_chlog` tinyint(1) NOT NULL DEFAULT '0', - `position` int(11) DEFAULT NULL, - `is_in_roadmap` tinyint(1) NOT NULL DEFAULT '1', - `fields_bits` int(11) DEFAULT '0', - `default_status_id` int(11) DEFAULT NULL, - PRIMARY KEY (`id`) -) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `trackers` --- - -LOCK TABLES `trackers` WRITE; -/*!40000 ALTER TABLE `trackers` DISABLE KEYS */; -INSERT INTO `trackers` VALUES (1,'Play','',0,1,0,255,2); -/*!40000 ALTER TABLE `trackers` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `user_preferences` --- - -DROP TABLE IF EXISTS `user_preferences`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `user_preferences` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `user_id` int(11) NOT NULL DEFAULT '0', - `others` text, - `hide_mail` tinyint(1) DEFAULT '1', - `time_zone` varchar(255) DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `index_user_preferences_on_user_id` (`user_id`) -) ENGINE=InnoDB AUTO_INCREMENT=4 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `user_preferences` --- - -LOCK TABLES `user_preferences` WRITE; -/*!40000 ALTER TABLE `user_preferences` DISABLE KEYS */; -INSERT INTO `user_preferences` VALUES (1,1,'---\n:no_self_notified: \'1\'\n:my_page_layout:\n left:\n - issuesassignedtome\n right:\n - issuesreportedbyme\n:my_page_settings: {}\n:comments_sorting: asc\n:warn_on_leaving_unsaved: \'1\'\n:textarea_font: \'\'\n:recently_used_projects: 3\n:history_default_tab: notes\n:recently_used_project_ids: \'1\'\n',1,''),(3,9,'---\n:no_self_notified: \'1\'\n:comments_sorting: asc\n:warn_on_leaving_unsaved: \'1\'\n:textarea_font: \'\'\n:recently_used_projects: 3\n:history_default_tab: notes\n:my_page_layout:\n left:\n - issuesassignedtome\n right:\n - issuesreportedbyme\n:my_page_settings: {}\n:recently_used_project_ids: \'1\'\n',1,''); -/*!40000 ALTER TABLE `user_preferences` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `users` --- - -DROP TABLE IF EXISTS `users`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `users` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `login` varchar(255) NOT NULL DEFAULT '', - `hashed_password` varchar(40) NOT NULL DEFAULT '', - `firstname` varchar(30) NOT NULL DEFAULT '', - `lastname` varchar(255) NOT NULL DEFAULT '', - `admin` tinyint(1) NOT NULL DEFAULT '0', - `status` int(11) NOT NULL DEFAULT '1', - `last_login_on` datetime DEFAULT NULL, - `language` varchar(5) DEFAULT '', - `auth_source_id` int(11) DEFAULT NULL, - `created_on` timestamp NULL DEFAULT NULL, - `updated_on` timestamp NULL DEFAULT NULL, - `type` varchar(255) DEFAULT NULL, - `identity_url` varchar(255) DEFAULT NULL, - `mail_notification` varchar(255) NOT NULL DEFAULT '', - `salt` varchar(64) DEFAULT NULL, - `must_change_passwd` tinyint(1) NOT NULL DEFAULT '0', - `passwd_changed_on` datetime DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `index_users_on_id_and_type` (`id`,`type`), - KEY `index_users_on_auth_source_id` (`auth_source_id`), - KEY `index_users_on_type` (`type`) -) ENGINE=InnoDB AUTO_INCREMENT=10 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `users` --- - -LOCK TABLES `users` WRITE; -/*!40000 ALTER TABLE `users` DISABLE KEYS */; -INSERT INTO `users` - VALUES - (1,'admin','ADMIN_HASH','Admin','Admin',1,1,'2020-08-17 18:03:20','',NULL,'2020-04-26 13:08:34','2020-04-26 13:10:45','User',NULL,'all','ADMIN_SALT',0,'2020-04-26 13:10:27'), - (2,'','','','Anonymous users',0,1,NULL,'',NULL,'2020-04-26 13:08:38','2020-04-26 13:08:38','GroupAnonymous',NULL,'',NULL,0,NULL), - (3,'','','','Non member users',0,1,NULL,'',NULL,'2020-04-26 13:08:38','2020-04-26 13:08:38','GroupNonMember',NULL,'',NULL,0,NULL), - (4,'','','','Anonymous',0,0,NULL,'',NULL,'2020-04-26 13:09:44','2020-04-26 13:09:44','AnonymousUser',NULL,'only_my_events',NULL,0,NULL), - (5,'','','','Analysts',0,1,NULL,'',NULL,'2020-04-26 18:43:40','2020-04-26 18:43:40','Group',NULL,'',NULL,0,NULL), - (6,'','','','Automation',0,1,NULL,'',NULL,'2020-04-26 18:43:47','2020-04-26 18:43:47','Group',NULL,'',NULL,0,NULL), - (7,'','','','Admins',0,1,NULL,'',NULL,'2020-04-26 18:43:58','2020-04-26 18:43:58','Group',NULL,'',NULL,0,NULL) -; -/*!40000 ALTER TABLE `users` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `versions` --- - -DROP TABLE IF EXISTS `versions`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `versions` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `project_id` int(11) NOT NULL DEFAULT '0', - `name` varchar(255) NOT NULL DEFAULT '', - `description` varchar(255) DEFAULT '', - `effective_date` date DEFAULT NULL, - `created_on` timestamp NULL DEFAULT NULL, - `updated_on` timestamp NULL DEFAULT NULL, - `wiki_page_title` varchar(255) DEFAULT NULL, - `status` varchar(255) DEFAULT 'open', - `sharing` varchar(255) NOT NULL DEFAULT 'none', - PRIMARY KEY (`id`), - KEY `versions_project_id` (`project_id`), - KEY `index_versions_on_sharing` (`sharing`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `versions` --- - -LOCK TABLES `versions` WRITE; -/*!40000 ALTER TABLE `versions` DISABLE KEYS */; -/*!40000 ALTER TABLE `versions` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `watchers` --- - -DROP TABLE IF EXISTS `watchers`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `watchers` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `watchable_type` varchar(255) NOT NULL DEFAULT '', - `watchable_id` int(11) NOT NULL DEFAULT '0', - `user_id` int(11) DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `watchers_user_id_type` (`user_id`,`watchable_type`), - KEY `index_watchers_on_user_id` (`user_id`), - KEY `index_watchers_on_watchable_id_and_watchable_type` (`watchable_id`,`watchable_type`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `watchers` --- - -LOCK TABLES `watchers` WRITE; -/*!40000 ALTER TABLE `watchers` DISABLE KEYS */; -/*!40000 ALTER TABLE `watchers` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `webhooks` --- - -DROP TABLE IF EXISTS `webhooks`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `webhooks` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `url` varchar(255) DEFAULT NULL, - `project_id` int(11) DEFAULT NULL, - PRIMARY KEY (`id`) -) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `webhooks` --- - -LOCK TABLES `webhooks` WRITE; -/*!40000 ALTER TABLE `webhooks` DISABLE KEYS */; -INSERT INTO `webhooks` VALUES (1,'http://10.66.166.135:7000/playbook/webhook',1); -/*!40000 ALTER TABLE `webhooks` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `wiki_content_versions` --- - -DROP TABLE IF EXISTS `wiki_content_versions`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `wiki_content_versions` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `wiki_content_id` int(11) NOT NULL, - `page_id` int(11) NOT NULL, - `author_id` int(11) DEFAULT NULL, - `data` longblob, - `compression` varchar(6) DEFAULT '', - `comments` varchar(1024) DEFAULT '', - `updated_on` datetime NOT NULL, - `version` int(11) NOT NULL, - PRIMARY KEY (`id`), - KEY `wiki_content_versions_wcid` (`wiki_content_id`), - KEY `index_wiki_content_versions_on_updated_on` (`updated_on`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `wiki_content_versions` --- - -LOCK TABLES `wiki_content_versions` WRITE; -/*!40000 ALTER TABLE `wiki_content_versions` DISABLE KEYS */; -/*!40000 ALTER TABLE `wiki_content_versions` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `wiki_contents` --- - -DROP TABLE IF EXISTS `wiki_contents`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `wiki_contents` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `page_id` int(11) NOT NULL, - `author_id` int(11) DEFAULT NULL, - `text` longtext, - `comments` varchar(1024) DEFAULT '', - `updated_on` datetime NOT NULL, - `version` int(11) NOT NULL, - PRIMARY KEY (`id`), - KEY `wiki_contents_page_id` (`page_id`), - KEY `index_wiki_contents_on_author_id` (`author_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `wiki_contents` --- - -LOCK TABLES `wiki_contents` WRITE; -/*!40000 ALTER TABLE `wiki_contents` DISABLE KEYS */; -/*!40000 ALTER TABLE `wiki_contents` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `wiki_pages` --- - -DROP TABLE IF EXISTS `wiki_pages`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `wiki_pages` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `wiki_id` int(11) NOT NULL, - `title` varchar(255) NOT NULL, - `created_on` datetime NOT NULL, - `protected` tinyint(1) NOT NULL DEFAULT '0', - `parent_id` int(11) DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `wiki_pages_wiki_id_title` (`wiki_id`,`title`), - KEY `index_wiki_pages_on_wiki_id` (`wiki_id`), - KEY `index_wiki_pages_on_parent_id` (`parent_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `wiki_pages` --- - -LOCK TABLES `wiki_pages` WRITE; -/*!40000 ALTER TABLE `wiki_pages` DISABLE KEYS */; -/*!40000 ALTER TABLE `wiki_pages` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `wiki_redirects` --- - -DROP TABLE IF EXISTS `wiki_redirects`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `wiki_redirects` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `wiki_id` int(11) NOT NULL, - `title` varchar(255) DEFAULT NULL, - `redirects_to` varchar(255) DEFAULT NULL, - `created_on` datetime NOT NULL, - `redirects_to_wiki_id` int(11) NOT NULL, - PRIMARY KEY (`id`), - KEY `wiki_redirects_wiki_id_title` (`wiki_id`,`title`), - KEY `index_wiki_redirects_on_wiki_id` (`wiki_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `wiki_redirects` --- - -LOCK TABLES `wiki_redirects` WRITE; -/*!40000 ALTER TABLE `wiki_redirects` DISABLE KEYS */; -/*!40000 ALTER TABLE `wiki_redirects` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `wikis` --- - -DROP TABLE IF EXISTS `wikis`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `wikis` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `project_id` int(11) NOT NULL, - `start_page` varchar(255) NOT NULL, - `status` int(11) NOT NULL DEFAULT '1', - PRIMARY KEY (`id`), - KEY `wikis_project_id` (`project_id`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `wikis` --- - -LOCK TABLES `wikis` WRITE; -/*!40000 ALTER TABLE `wikis` DISABLE KEYS */; -/*!40000 ALTER TABLE `wikis` ENABLE KEYS */; -UNLOCK TABLES; - --- --- Table structure for table `workflows` --- - -DROP TABLE IF EXISTS `workflows`; -/*!40101 SET @saved_cs_client = @@character_set_client */; -/*!40101 SET character_set_client = utf8 */; -CREATE TABLE `workflows` ( - `id` int(11) NOT NULL AUTO_INCREMENT, - `tracker_id` int(11) NOT NULL DEFAULT '0', - `old_status_id` int(11) NOT NULL DEFAULT '0', - `new_status_id` int(11) NOT NULL DEFAULT '0', - `role_id` int(11) NOT NULL DEFAULT '0', - `assignee` tinyint(1) NOT NULL DEFAULT '0', - `author` tinyint(1) NOT NULL DEFAULT '0', - `type` varchar(30) DEFAULT NULL, - `field_name` varchar(30) DEFAULT NULL, - `rule` varchar(30) DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `wkfs_role_tracker_old_status` (`role_id`,`tracker_id`,`old_status_id`), - KEY `index_workflows_on_old_status_id` (`old_status_id`), - KEY `index_workflows_on_role_id` (`role_id`), - KEY `index_workflows_on_new_status_id` (`new_status_id`), - KEY `index_workflows_on_tracker_id` (`tracker_id`) -) ENGINE=InnoDB AUTO_INCREMENT=652 DEFAULT CHARSET=latin1; -/*!40101 SET character_set_client = @saved_cs_client */; - --- --- Dumping data for table `workflows` --- - -LOCK TABLES `workflows` WRITE; -/*!40000 ALTER TABLE `workflows` DISABLE KEYS */; -INSERT INTO `workflows` VALUES (132,1,2,0,3,0,0,'WorkflowPermission','14','readonly'),(134,1,2,0,3,0,0,'WorkflowPermission','16','readonly'),(151,1,3,0,3,0,0,'WorkflowPermission','14','readonly'),(153,1,3,0,3,0,0,'WorkflowPermission','16','readonly'),(170,1,4,0,3,0,0,'WorkflowPermission','14','readonly'),(172,1,4,0,3,0,0,'WorkflowPermission','16','readonly'),(189,1,5,0,3,0,0,'WorkflowPermission','14','readonly'),(191,1,5,0,3,0,0,'WorkflowPermission','16','readonly'),(208,1,6,0,3,0,0,'WorkflowPermission','14','readonly'),(210,1,6,0,3,0,0,'WorkflowPermission','16','readonly'),(220,1,2,3,3,0,0,'WorkflowTransition',NULL,NULL),(221,1,2,3,4,0,0,'WorkflowTransition',NULL,NULL),(222,1,2,3,5,0,0,'WorkflowTransition',NULL,NULL),(226,1,3,4,3,0,0,'WorkflowTransition',NULL,NULL),(227,1,3,4,4,0,0,'WorkflowTransition',NULL,NULL),(228,1,3,4,5,0,0,'WorkflowTransition',NULL,NULL),(229,1,4,5,3,0,0,'WorkflowTransition',NULL,NULL),(230,1,4,5,4,0,0,'WorkflowTransition',NULL,NULL),(231,1,4,5,5,0,0,'WorkflowTransition',NULL,NULL),(232,1,4,6,3,0,0,'WorkflowTransition',NULL,NULL),(233,1,4,6,4,0,0,'WorkflowTransition',NULL,NULL),(234,1,4,6,5,0,0,'WorkflowTransition',NULL,NULL),(239,1,2,0,4,0,0,'WorkflowPermission','priority_id','readonly'),(240,1,3,0,4,0,0,'WorkflowPermission','priority_id','readonly'),(241,1,4,0,4,0,0,'WorkflowPermission','priority_id','readonly'),(242,1,5,0,4,0,0,'WorkflowPermission','priority_id','readonly'),(243,1,6,0,4,0,0,'WorkflowPermission','priority_id','readonly'),(244,1,0,2,5,0,0,'WorkflowTransition',NULL,NULL),(245,1,0,2,4,0,0,'WorkflowTransition',NULL,NULL),(246,1,0,6,5,0,0,'WorkflowTransition',NULL,NULL),(352,1,2,0,3,0,0,'WorkflowPermission','project_id','readonly'),(353,1,2,0,3,0,0,'WorkflowPermission','tracker_id','readonly'),(354,1,2,0,3,0,0,'WorkflowPermission','subject','readonly'),(355,1,2,0,3,0,0,'WorkflowPermission','priority_id','readonly'),(356,1,2,0,3,0,0,'WorkflowPermission','is_private','readonly'),(357,1,2,0,3,0,0,'WorkflowPermission','description','readonly'),(358,1,2,0,3,0,0,'WorkflowPermission','1','readonly'),(359,1,2,0,3,0,0,'WorkflowPermission','2','readonly'),(360,1,2,0,3,0,0,'WorkflowPermission','10','readonly'),(361,1,2,0,3,0,0,'WorkflowPermission','20','readonly'),(362,1,2,0,3,0,0,'WorkflowPermission','8','readonly'),(363,1,2,0,3,0,0,'WorkflowPermission','15','readonly'),(364,1,2,0,3,0,0,'WorkflowPermission','11','readonly'),(365,1,2,0,3,0,0,'WorkflowPermission','12','readonly'),(366,1,2,0,3,0,0,'WorkflowPermission','19','readonly'),(367,1,2,0,3,0,0,'WorkflowPermission','7','readonly'),(368,1,2,0,3,0,0,'WorkflowPermission','3','readonly'),(369,1,2,0,3,0,0,'WorkflowPermission','5','readonly'),(370,1,2,0,3,0,0,'WorkflowPermission','6','readonly'),(371,1,2,0,3,0,0,'WorkflowPermission','22','readonly'),(372,1,3,0,3,0,0,'WorkflowPermission','project_id','readonly'),(373,1,3,0,3,0,0,'WorkflowPermission','tracker_id','readonly'),(374,1,3,0,3,0,0,'WorkflowPermission','subject','readonly'),(375,1,3,0,3,0,0,'WorkflowPermission','priority_id','readonly'),(376,1,3,0,3,0,0,'WorkflowPermission','is_private','readonly'),(377,1,3,0,3,0,0,'WorkflowPermission','description','readonly'),(378,1,3,0,3,0,0,'WorkflowPermission','1','readonly'),(379,1,3,0,3,0,0,'WorkflowPermission','2','readonly'),(380,1,3,0,3,0,0,'WorkflowPermission','10','readonly'),(381,1,3,0,3,0,0,'WorkflowPermission','20','readonly'),(382,1,3,0,3,0,0,'WorkflowPermission','8','readonly'),(383,1,3,0,3,0,0,'WorkflowPermission','15','readonly'),(384,1,3,0,3,0,0,'WorkflowPermission','11','readonly'),(385,1,3,0,3,0,0,'WorkflowPermission','12','readonly'),(386,1,3,0,3,0,0,'WorkflowPermission','19','readonly'),(387,1,3,0,3,0,0,'WorkflowPermission','7','readonly'),(388,1,3,0,3,0,0,'WorkflowPermission','3','readonly'),(389,1,3,0,3,0,0,'WorkflowPermission','5','readonly'),(390,1,3,0,3,0,0,'WorkflowPermission','6','readonly'),(391,1,3,0,3,0,0,'WorkflowPermission','22','readonly'),(392,1,4,0,3,0,0,'WorkflowPermission','project_id','readonly'),(393,1,4,0,3,0,0,'WorkflowPermission','tracker_id','readonly'),(394,1,4,0,3,0,0,'WorkflowPermission','subject','readonly'),(395,1,4,0,3,0,0,'WorkflowPermission','priority_id','readonly'),(396,1,4,0,3,0,0,'WorkflowPermission','is_private','readonly'),(397,1,4,0,3,0,0,'WorkflowPermission','description','readonly'),(398,1,4,0,3,0,0,'WorkflowPermission','1','readonly'),(399,1,4,0,3,0,0,'WorkflowPermission','2','readonly'),(400,1,4,0,3,0,0,'WorkflowPermission','10','readonly'),(401,1,4,0,3,0,0,'WorkflowPermission','20','readonly'),(402,1,4,0,3,0,0,'WorkflowPermission','8','readonly'),(403,1,4,0,3,0,0,'WorkflowPermission','15','readonly'),(404,1,4,0,3,0,0,'WorkflowPermission','11','readonly'),(405,1,4,0,3,0,0,'WorkflowPermission','12','readonly'),(406,1,4,0,3,0,0,'WorkflowPermission','19','readonly'),(407,1,4,0,3,0,0,'WorkflowPermission','7','readonly'),(408,1,4,0,3,0,0,'WorkflowPermission','3','readonly'),(409,1,4,0,3,0,0,'WorkflowPermission','5','readonly'),(410,1,4,0,3,0,0,'WorkflowPermission','6','readonly'),(411,1,4,0,3,0,0,'WorkflowPermission','22','readonly'),(412,1,5,0,3,0,0,'WorkflowPermission','project_id','readonly'),(413,1,5,0,3,0,0,'WorkflowPermission','tracker_id','readonly'),(414,1,5,0,3,0,0,'WorkflowPermission','subject','readonly'),(415,1,5,0,3,0,0,'WorkflowPermission','priority_id','readonly'),(416,1,5,0,3,0,0,'WorkflowPermission','is_private','readonly'),(417,1,5,0,3,0,0,'WorkflowPermission','description','readonly'),(418,1,5,0,3,0,0,'WorkflowPermission','1','readonly'),(419,1,5,0,3,0,0,'WorkflowPermission','2','readonly'),(420,1,5,0,3,0,0,'WorkflowPermission','10','readonly'),(421,1,5,0,3,0,0,'WorkflowPermission','20','readonly'),(422,1,5,0,3,0,0,'WorkflowPermission','8','readonly'),(423,1,5,0,3,0,0,'WorkflowPermission','15','readonly'),(424,1,5,0,3,0,0,'WorkflowPermission','11','readonly'),(425,1,5,0,3,0,0,'WorkflowPermission','12','readonly'),(426,1,5,0,3,0,0,'WorkflowPermission','19','readonly'),(427,1,5,0,3,0,0,'WorkflowPermission','7','readonly'),(428,1,5,0,3,0,0,'WorkflowPermission','3','readonly'),(429,1,5,0,3,0,0,'WorkflowPermission','5','readonly'),(430,1,5,0,3,0,0,'WorkflowPermission','6','readonly'),(431,1,5,0,3,0,0,'WorkflowPermission','22','readonly'),(432,1,6,0,3,0,0,'WorkflowPermission','project_id','readonly'),(433,1,6,0,3,0,0,'WorkflowPermission','tracker_id','readonly'),(434,1,6,0,3,0,0,'WorkflowPermission','subject','readonly'),(435,1,6,0,3,0,0,'WorkflowPermission','priority_id','readonly'),(436,1,6,0,3,0,0,'WorkflowPermission','is_private','readonly'),(437,1,6,0,3,0,0,'WorkflowPermission','description','readonly'),(438,1,6,0,3,0,0,'WorkflowPermission','1','readonly'),(439,1,6,0,3,0,0,'WorkflowPermission','2','readonly'),(440,1,6,0,3,0,0,'WorkflowPermission','10','readonly'),(441,1,6,0,3,0,0,'WorkflowPermission','20','readonly'),(442,1,6,0,3,0,0,'WorkflowPermission','8','readonly'),(443,1,6,0,3,0,0,'WorkflowPermission','15','readonly'),(444,1,6,0,3,0,0,'WorkflowPermission','11','readonly'),(445,1,6,0,3,0,0,'WorkflowPermission','12','readonly'),(446,1,6,0,3,0,0,'WorkflowPermission','19','readonly'),(447,1,6,0,3,0,0,'WorkflowPermission','7','readonly'),(448,1,6,0,3,0,0,'WorkflowPermission','3','readonly'),(449,1,6,0,3,0,0,'WorkflowPermission','5','readonly'),(450,1,6,0,3,0,0,'WorkflowPermission','6','readonly'),(451,1,6,0,3,0,0,'WorkflowPermission','22','readonly'),(537,1,2,0,2,0,0,'WorkflowPermission','project_id','readonly'),(538,1,2,0,2,0,0,'WorkflowPermission','tracker_id','readonly'),(539,1,2,0,2,0,0,'WorkflowPermission','subject','readonly'),(540,1,2,0,2,0,0,'WorkflowPermission','priority_id','readonly'),(541,1,2,0,2,0,0,'WorkflowPermission','is_private','readonly'),(542,1,2,0,2,0,0,'WorkflowPermission','description','readonly'),(543,1,2,0,2,0,0,'WorkflowPermission','1','readonly'),(544,1,2,0,2,0,0,'WorkflowPermission','2','readonly'),(545,1,2,0,2,0,0,'WorkflowPermission','10','readonly'),(546,1,2,0,2,0,0,'WorkflowPermission','20','readonly'),(547,1,2,0,2,0,0,'WorkflowPermission','8','readonly'),(548,1,2,0,2,0,0,'WorkflowPermission','15','readonly'),(549,1,2,0,2,0,0,'WorkflowPermission','11','readonly'),(550,1,2,0,2,0,0,'WorkflowPermission','12','readonly'),(551,1,2,0,2,0,0,'WorkflowPermission','19','readonly'),(552,1,2,0,2,0,0,'WorkflowPermission','17','readonly'),(553,1,2,0,2,0,0,'WorkflowPermission','7','readonly'),(554,1,2,0,2,0,0,'WorkflowPermission','3','readonly'),(555,1,2,0,2,0,0,'WorkflowPermission','5','readonly'),(556,1,2,0,2,0,0,'WorkflowPermission','6','readonly'),(557,1,2,0,2,0,0,'WorkflowPermission','22','readonly'),(558,1,3,0,2,0,0,'WorkflowPermission','project_id','readonly'),(559,1,3,0,2,0,0,'WorkflowPermission','tracker_id','readonly'),(560,1,3,0,2,0,0,'WorkflowPermission','subject','readonly'),(561,1,3,0,2,0,0,'WorkflowPermission','priority_id','readonly'),(562,1,3,0,2,0,0,'WorkflowPermission','is_private','readonly'),(563,1,3,0,2,0,0,'WorkflowPermission','description','readonly'),(564,1,3,0,2,0,0,'WorkflowPermission','1','readonly'),(565,1,3,0,2,0,0,'WorkflowPermission','2','readonly'),(566,1,3,0,2,0,0,'WorkflowPermission','10','readonly'),(567,1,3,0,2,0,0,'WorkflowPermission','20','readonly'),(568,1,3,0,2,0,0,'WorkflowPermission','8','readonly'),(569,1,3,0,2,0,0,'WorkflowPermission','15','readonly'),(570,1,3,0,2,0,0,'WorkflowPermission','11','readonly'),(571,1,3,0,2,0,0,'WorkflowPermission','12','readonly'),(572,1,3,0,2,0,0,'WorkflowPermission','19','readonly'),(573,1,3,0,2,0,0,'WorkflowPermission','17','readonly'),(574,1,3,0,2,0,0,'WorkflowPermission','7','readonly'),(575,1,3,0,2,0,0,'WorkflowPermission','3','readonly'),(576,1,3,0,2,0,0,'WorkflowPermission','5','readonly'),(577,1,3,0,2,0,0,'WorkflowPermission','6','readonly'),(578,1,3,0,2,0,0,'WorkflowPermission','22','readonly'),(579,1,4,0,2,0,0,'WorkflowPermission','project_id','readonly'),(580,1,4,0,2,0,0,'WorkflowPermission','tracker_id','readonly'),(581,1,4,0,2,0,0,'WorkflowPermission','subject','readonly'),(582,1,4,0,2,0,0,'WorkflowPermission','priority_id','readonly'),(583,1,4,0,2,0,0,'WorkflowPermission','is_private','readonly'),(584,1,4,0,2,0,0,'WorkflowPermission','description','readonly'),(585,1,4,0,2,0,0,'WorkflowPermission','1','readonly'),(586,1,4,0,2,0,0,'WorkflowPermission','2','readonly'),(587,1,4,0,2,0,0,'WorkflowPermission','10','readonly'),(588,1,4,0,2,0,0,'WorkflowPermission','20','readonly'),(589,1,4,0,2,0,0,'WorkflowPermission','8','readonly'),(590,1,4,0,2,0,0,'WorkflowPermission','15','readonly'),(591,1,4,0,2,0,0,'WorkflowPermission','11','readonly'),(592,1,4,0,2,0,0,'WorkflowPermission','12','readonly'),(593,1,4,0,2,0,0,'WorkflowPermission','19','readonly'),(594,1,4,0,2,0,0,'WorkflowPermission','17','readonly'),(595,1,4,0,2,0,0,'WorkflowPermission','7','readonly'),(596,1,4,0,2,0,0,'WorkflowPermission','3','readonly'),(597,1,4,0,2,0,0,'WorkflowPermission','5','readonly'),(598,1,4,0,2,0,0,'WorkflowPermission','6','readonly'),(599,1,4,0,2,0,0,'WorkflowPermission','22','readonly'),(600,1,5,0,2,0,0,'WorkflowPermission','project_id','readonly'),(601,1,5,0,2,0,0,'WorkflowPermission','tracker_id','readonly'),(602,1,5,0,2,0,0,'WorkflowPermission','subject','readonly'),(603,1,5,0,2,0,0,'WorkflowPermission','priority_id','readonly'),(604,1,5,0,2,0,0,'WorkflowPermission','is_private','readonly'),(605,1,5,0,2,0,0,'WorkflowPermission','description','readonly'),(606,1,5,0,2,0,0,'WorkflowPermission','1','readonly'),(607,1,5,0,2,0,0,'WorkflowPermission','2','readonly'),(608,1,5,0,2,0,0,'WorkflowPermission','10','readonly'),(609,1,5,0,2,0,0,'WorkflowPermission','20','readonly'),(610,1,5,0,2,0,0,'WorkflowPermission','8','readonly'),(611,1,5,0,2,0,0,'WorkflowPermission','15','readonly'),(612,1,5,0,2,0,0,'WorkflowPermission','11','readonly'),(613,1,5,0,2,0,0,'WorkflowPermission','12','readonly'),(614,1,5,0,2,0,0,'WorkflowPermission','19','readonly'),(615,1,5,0,2,0,0,'WorkflowPermission','17','readonly'),(616,1,5,0,2,0,0,'WorkflowPermission','7','readonly'),(617,1,5,0,2,0,0,'WorkflowPermission','3','readonly'),(618,1,5,0,2,0,0,'WorkflowPermission','5','readonly'),(619,1,5,0,2,0,0,'WorkflowPermission','6','readonly'),(620,1,5,0,2,0,0,'WorkflowPermission','22','readonly'),(621,1,6,0,2,0,0,'WorkflowPermission','project_id','readonly'),(622,1,6,0,2,0,0,'WorkflowPermission','tracker_id','readonly'),(623,1,6,0,2,0,0,'WorkflowPermission','subject','readonly'),(624,1,6,0,2,0,0,'WorkflowPermission','priority_id','readonly'),(625,1,6,0,2,0,0,'WorkflowPermission','is_private','readonly'),(626,1,6,0,2,0,0,'WorkflowPermission','description','readonly'),(627,1,6,0,2,0,0,'WorkflowPermission','1','readonly'),(628,1,6,0,2,0,0,'WorkflowPermission','2','readonly'),(629,1,6,0,2,0,0,'WorkflowPermission','10','readonly'),(630,1,6,0,2,0,0,'WorkflowPermission','20','readonly'),(631,1,6,0,2,0,0,'WorkflowPermission','8','readonly'),(632,1,6,0,2,0,0,'WorkflowPermission','15','readonly'),(633,1,6,0,2,0,0,'WorkflowPermission','11','readonly'),(634,1,6,0,2,0,0,'WorkflowPermission','12','readonly'),(635,1,6,0,2,0,0,'WorkflowPermission','19','readonly'),(636,1,6,0,2,0,0,'WorkflowPermission','17','readonly'),(637,1,6,0,2,0,0,'WorkflowPermission','7','readonly'),(638,1,6,0,2,0,0,'WorkflowPermission','3','readonly'),(639,1,6,0,2,0,0,'WorkflowPermission','5','readonly'),(640,1,6,0,2,0,0,'WorkflowPermission','6','readonly'),(641,1,6,0,2,0,0,'WorkflowPermission','22','readonly'),(642,1,2,3,2,0,0,'WorkflowTransition',NULL,NULL),(644,1,3,4,2,0,0,'WorkflowTransition',NULL,NULL),(645,1,4,5,2,0,0,'WorkflowTransition',NULL,NULL),(646,1,4,6,2,0,0,'WorkflowTransition',NULL,NULL),(648,1,4,3,2,0,0,'WorkflowTransition',NULL,NULL),(649,1,4,3,3,0,0,'WorkflowTransition',NULL,NULL),(650,1,4,3,4,0,0,'WorkflowTransition',NULL,NULL),(651,1,4,3,5,0,0,'WorkflowTransition',NULL,NULL); -/*!40000 ALTER TABLE `workflows` ENABLE KEYS */; -UNLOCK TABLES; -/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; - -/*!40101 SET SQL_MODE=@OLD_SQL_MODE */; -/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */; -/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */; -/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; -/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; -/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; -/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; - --- Dump completed on 2020-08-17 18:06:56 diff --git a/salt/playbook/init.sls b/salt/playbook/init.sls deleted file mode 100644 index f8395f7b2..000000000 --- a/salt/playbook/init.sls +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'vars/globals.map.jinja' import GLOBALS %} -{% from 'playbook/map.jinja' import PLAYBOOKMERGED %} - -include: -{% if PLAYBOOKMERGED.enabled %} - - playbook.enabled -{% else %} - - playbook.disabled -{% endif %} diff --git a/salt/playbook/map.jinja b/salt/playbook/map.jinja deleted file mode 100644 index 0ee058c68..000000000 --- a/salt/playbook/map.jinja +++ /dev/null @@ -1,2 +0,0 @@ -{% import_yaml 'playbook/defaults.yaml' as PLAYBOOKDEFAULTS %} -{% set PLAYBOOKMERGED = salt['pillar.get']('playbook', PLAYBOOKDEFAULTS.playbook, merge=True) %} diff --git a/salt/playbook/soc_playbook.yaml b/salt/playbook/soc_playbook.yaml deleted file mode 100644 index e07ae8653..000000000 --- a/salt/playbook/soc_playbook.yaml +++ /dev/null @@ -1,4 +0,0 @@ -playbook: - enabled: - description: You can enable or disable Playbook. - helpLink: playbook.html diff --git a/salt/playbook/sostatus.sls b/salt/playbook/sostatus.sls deleted file mode 100644 index f635746d3..000000000 --- a/salt/playbook/sostatus.sls +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} - -append_so-playbook_so-status.conf: - file.append: - - name: /opt/so/conf/so-status/so-status.conf - - text: so-playbook - - unless: grep -q so-playbook /opt/so/conf/so-status/so-status.conf - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - -{% endif %} diff --git a/salt/playbook/tools/sbin/so-playbook-import b/salt/playbook/tools/sbin/so-playbook-import deleted file mode 100755 index d775656a1..000000000 --- a/salt/playbook/tools/sbin/so-playbook-import +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - - -. /usr/sbin/so-common - -ENABLEPLAY=${1:-False} - -docker exec so-soctopus /usr/local/bin/python -c "import playbook; print(playbook.play_import($ENABLEPLAY))" diff --git a/salt/playbook/tools/sbin/so-playbook-reset b/salt/playbook/tools/sbin/so-playbook-reset deleted file mode 100755 index 106d9e852..000000000 --- a/salt/playbook/tools/sbin/so-playbook-reset +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash -# -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - - -. /usr/sbin/so-common - -salt-call state.apply playbook.db_init,playbook queue=True - -/usr/sbin/so-soctopus-restart - -salt-call state.apply playbook,playbook.automation_user_create queue=True - -/usr/sbin/so-soctopus-restart - -echo "Importing Plays - NOTE: this will continue after installation finishes and could take an hour or more. Rebooting while the import is in progress will delay playbook imports." -sleep 5 -so-playbook-ruleupdate >> /root/setup_playbook_rule_update.log 2>&1 & diff --git a/salt/playbook/tools/sbin/so-playbook-restart b/salt/playbook/tools/sbin/so-playbook-restart deleted file mode 100755 index c59e7f7eb..000000000 --- a/salt/playbook/tools/sbin/so-playbook-restart +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - - -. /usr/sbin/so-common - -/usr/sbin/so-restart playbook $1 diff --git a/salt/playbook/tools/sbin/so-playbook-ruleupdate b/salt/playbook/tools/sbin/so-playbook-ruleupdate deleted file mode 100755 index cbfe72bce..000000000 --- a/salt/playbook/tools/sbin/so-playbook-ruleupdate +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - - -. /usr/sbin/so-common - -docker exec so-soctopus python3 playbook_bulk-update.py diff --git a/salt/playbook/tools/sbin/so-playbook-sigma-refresh b/salt/playbook/tools/sbin/so-playbook-sigma-refresh deleted file mode 100755 index fefd4ca68..000000000 --- a/salt/playbook/tools/sbin/so-playbook-sigma-refresh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - - -. /usr/sbin/so-common - -if ! [ -f /opt/so/state/playbook_regen_plays ] || [ "$1" = "--force" ]; then - - echo "Refreshing Sigma & regenerating plays... " - - # Regenerate ElastAlert & update Plays - docker exec so-soctopus python3 playbook_play-update.py - - # Delete current Elastalert Rules - rm /opt/so/rules/elastalert/playbook/*.yaml - - # Regenerate Elastalert Rules - so-playbook-sync - - # Create state file - touch /opt/so/state/playbook_regen_plays -else - printf "\nState file found, exiting...\nRerun with --force to override.\n" -fi \ No newline at end of file diff --git a/salt/playbook/tools/sbin/so-playbook-start b/salt/playbook/tools/sbin/so-playbook-start deleted file mode 100755 index 070bcc4f7..000000000 --- a/salt/playbook/tools/sbin/so-playbook-start +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - - -. /usr/sbin/so-common - -/usr/sbin/so-start playbook $1 diff --git a/salt/playbook/tools/sbin/so-playbook-stop b/salt/playbook/tools/sbin/so-playbook-stop deleted file mode 100755 index 64ce83b2b..000000000 --- a/salt/playbook/tools/sbin/so-playbook-stop +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - - -. /usr/sbin/so-common - -/usr/sbin/so-stop playbook $1 diff --git a/salt/playbook/tools/sbin/so-playbook-sync b/salt/playbook/tools/sbin/so-playbook-sync deleted file mode 100755 index 7f6ba4e31..000000000 --- a/salt/playbook/tools/sbin/so-playbook-sync +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - - -. /usr/sbin/so-common - -# Check to see if we are already running -NUM_RUNNING=$(pgrep -cf "/bin/bash /usr/sbin/so-playbook-sync") -[ "$NUM_RUNNING" -gt 1 ] && echo "$(date) - $NUM_RUNNING Playbook sync processes running...exiting." && exit 0 - -docker exec so-soctopus python3 playbook_play-sync.py diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 8bb180567..e3fcd4206 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1291,11 +1291,6 @@ soc: icon: fa-external-link-alt target: so-cyberchef link: /cyberchef/ - - name: toolPlaybook - description: toolPlaybookHelp - icon: fa-external-link-alt - target: so-playbook - link: /playbook/projects/detection-playbooks/issues/ - name: toolNavigator description: toolNavigatorHelp icon: fa-external-link-alt diff --git a/salt/soctopus/config.sls b/salt/soctopus/config.sls deleted file mode 100644 index 35b55d296..000000000 --- a/salt/soctopus/config.sls +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} -{% from 'vars/globals.map.jinja' import GLOBALS %} - -include: - - nginx.config - -soctopusdir: - file.directory: - - name: /opt/so/conf/soctopus/sigma-import - - user: 939 - - group: 939 - - makedirs: True - -soctopus-sync: - file.recurse: - - name: /opt/so/conf/soctopus/templates - - source: salt://soctopus/files/templates - - user: 939 - - group: 939 - - template: jinja - - defaults: - GLOBALS: {{ GLOBALS }} - -soctopusconf: - file.managed: - - name: /opt/so/conf/soctopus/SOCtopus.conf - - source: salt://soctopus/files/SOCtopus.conf - - user: 939 - - group: 939 - - mode: 600 - - template: jinja - - show_changes: False - - defaults: - GLOBALS: {{ GLOBALS }} - -soctopuslogdir: - file.directory: - - name: /opt/so/log/soctopus - - user: 939 - - group: 939 - -playbookrulesdir: - file.directory: - - name: /opt/so/rules/elastalert/playbook - - user: 939 - - group: 939 - - makedirs: True - -playbookrulessync: - file.recurse: - - name: /opt/so/rules/elastalert/playbook - - source: salt://soctopus/files/templates - - user: 939 - - group: 939 - - template: jinja - - defaults: - GLOBALS: {{ GLOBALS }} - -soctopus_sbin: - file.recurse: - - name: /usr/sbin - - source: salt://soctopus/tools/sbin - - user: 939 - - group: 939 - - file_mode: 755 - -#soctopus_sbin_jinja: -# file.recurse: -# - name: /usr/sbin -# - source: salt://soctopus/tools/sbin_jinja -# - user: 939 -# - group: 939 -# - file_mode: 755 -# - template: jinja - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - -{% endif %} diff --git a/salt/soctopus/defaults.yaml b/salt/soctopus/defaults.yaml deleted file mode 100644 index cb7f286ae..000000000 --- a/salt/soctopus/defaults.yaml +++ /dev/null @@ -1,2 +0,0 @@ -soctopus: - enabled: False diff --git a/salt/soctopus/disabled.sls b/salt/soctopus/disabled.sls deleted file mode 100644 index 9293a9d71..000000000 --- a/salt/soctopus/disabled.sls +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} - -include: - - soctopus.sostatus - -so-soctopus: - docker_container.absent: - - force: True - -so-soctopus_so-status.disabled: - file.comment: - - name: /opt/so/conf/so-status/so-status.conf - - regex: ^so-soctopus$ - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - -{% endif %} diff --git a/salt/soctopus/enabled.sls b/salt/soctopus/enabled.sls deleted file mode 100644 index 567562fbb..000000000 --- a/salt/soctopus/enabled.sls +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} -{% from 'vars/globals.map.jinja' import GLOBALS %} -{% from 'docker/docker.map.jinja' import DOCKER %} - -include: - - soctopus.config - - soctopus.sostatus - -so-soctopus: - docker_container.running: - - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-soctopus:{{ GLOBALS.so_version }} - - hostname: soctopus - - name: so-soctopus - - networks: - - sobridge: - - ipv4_address: {{ DOCKER.containers['so-soctopus'].ip }} - - binds: - - /opt/so/conf/soctopus/SOCtopus.conf:/SOCtopus/SOCtopus.conf:ro - - /opt/so/log/soctopus/:/var/log/SOCtopus/:rw - - /opt/so/rules/elastalert/playbook:/etc/playbook-rules:rw - - /opt/so/conf/navigator/layers/:/etc/playbook/:rw - - /opt/so/conf/soctopus/sigma-import/:/SOCtopus/sigma-import/:rw - {% if GLOBALS.airgap %} - - /nsm/repo/rules/sigma:/soctopus/sigma - {% endif %} - {% if DOCKER.containers['so-soctopus'].custom_bind_mounts %} - {% for BIND in DOCKER.containers['so-soctopus'].custom_bind_mounts %} - - {{ BIND }} - {% endfor %} - {% endif %} - - port_bindings: - {% for BINDING in DOCKER.containers['so-soctopus'].port_bindings %} - - {{ BINDING }} - {% endfor %} - - extra_hosts: - - {{GLOBALS.url_base}}:{{GLOBALS.manager_ip}} - - {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }} - {% if DOCKER.containers['so-soctopus'].extra_hosts %} - {% for XTRAHOST in DOCKER.containers['so-soctopus'].extra_hosts %} - - {{ XTRAHOST }} - {% endfor %} - {% endif %} - {% if DOCKER.containers['so-soctopus'].extra_env %} - - environment: - {% for XTRAENV in DOCKER.containers['so-soctopus'].extra_env %} - - {{ XTRAENV }} - {% endfor %} - {% endif %} - - watch: - - file: /opt/so/conf/soctopus/SOCtopus.conf - - require: - - file: soctopusconf - - file: navigatordefaultlayer - -delete_so-soctopus_so-status.disabled: - file.uncomment: - - name: /opt/so/conf/so-status/so-status.conf - - regex: ^so-soctopus$ - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - -{% endif %} diff --git a/salt/soctopus/files/SOCtopus.conf b/salt/soctopus/files/SOCtopus.conf deleted file mode 100644 index d9dd03f16..000000000 --- a/salt/soctopus/files/SOCtopus.conf +++ /dev/null @@ -1,77 +0,0 @@ -{%- set HIVEKEY = salt['pillar.get']('global:hivekey', '') %} -{%- set THEHIVEURL = salt['pillar.get']('global:hiveurl', '') %} -{%- set CORTEXKEY = salt['pillar.get']('global:cortexorguserkey', '') %} -{%- set PLAYBOOK_KEY = salt['pillar.get']('secrets:playbook_automation_api_key', '') %} -{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %} -{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %} - -[es] -es_url = https://{{ GLOBALS.manager_ip }}:9200 -es_ip = {{ GLOBALS.manager_ip }} -es_user = {{ ES_USER }} -es_pass = "{{ ES_PASS }}" -es_index_pattern = so-* -es_verifycert = no - -[cortex] -auto_analyze_alerts = no -cortex_url = https://{{THEHIVEURL}}/cortex/ -cortex_key = {{ CORTEXKEY }} -supported_analyzers = Urlscan_io_Search,CERTatPassiveDNS - -[fir] -fir_url = YOURFIRURL -fir_token = YOURFIRTOKEN -fir_actor = 3 -fir_category = 3 -fir_confidentiality = 1 -fir_detection = 2 -fir_plan = 8 -fir_severity = 4 -fir_verifycert = no - -[grr] -grr_url = YOURGRRURL -grr_user = YOURGRRUSER -grr_pass = YOURGRRPASS - -[hive] -hive_url = https://{{THEHIVEURL}}/thehive/ -hive_key = {{ HIVEKEY }} -hive_tlp = 3 -hive_verifycert = no - -[misp] -misp_url = YOURMISPURL -misp_key = YOURMISPKEY -misp_verifycert = no -distrib = 0 -threat = 4 -analysis = 0 - -[rtir] -rtir_url = YOURRTIRURL -rtir_api = REST/1.0/ -rtir_user = YOURRTIRUSER -rtir_pass = YOURRTIRPASS -rtir_queue = Incidents -rtir_creator = root -rtir_verifycert = no - -[slack] -slack_url = YOURSLACKWORKSPACE -slack_webhook = YOURSLACKWEBHOOK - -[soc] -soc_url = http://{{ GLOBALS.manager }}:9822 - -[playbook] -playbook_url = http://{{ GLOBALS.manager }}:3000/playbook -playbook_ext_url = https://{{ GLOBALS.url_base }}/playbook -playbook_key = {{ PLAYBOOK_KEY }} -playbook_verifycert = no -playbook_unit_test_index = playbook-testing -playbook_rulesets = {{ salt['pillar.get']('soctopus:playbook:rulesets')|join(",") }} - -[log] -logfile = /var/log/SOCtopus/soctopus.log diff --git a/salt/soctopus/files/templates/es-generic.template b/salt/soctopus/files/templates/es-generic.template deleted file mode 100644 index af9859047..000000000 --- a/salt/soctopus/files/templates/es-generic.template +++ /dev/null @@ -1,5 +0,0 @@ -alert: modules.so.playbook-es.PlaybookESAlerter -elasticsearch_host: "{{ GLOBALS.manager_ip }}:9200" -play_title: "" -play_url: "https://{{ GLOBALS.manager_ip }}/playbook/issues/6000" -sigma_level: "" diff --git a/salt/soctopus/files/templates/generic.template b/salt/soctopus/files/templates/generic.template deleted file mode 100644 index 505d4ec41..000000000 --- a/salt/soctopus/files/templates/generic.template +++ /dev/null @@ -1,22 +0,0 @@ -alert: -- "modules.so.playbook-es.PlaybookESAlerter" - -elasticsearch_host: "{{ GLOBALS.url_base }}:9200" -play_title: "" -play_id: "" -event.module: "playbook" -event.dataset: "playbook.alert" -event.severity: -rule.category: -play_url: "https://{{ GLOBALS.url_base }}/playbook/issues/6000" -kibana_pivot: "https://{{ GLOBALS.url_base }}/kibana/app/kibana#/discover?_g=()&_a=(columns:!(_source),interval:auto,query:(language:lucene,query:'_id:{[_id]}'),sort:!('@timestamp',desc))" -soc_pivot: "https://{{ GLOBALS.url_base }}/#/hunt" -sigma_level: "" - -index: '.ds-logs-*' -name: EQL -priority: 3 -realert: - minutes: 0 -type: any -filter: diff --git a/salt/soctopus/files/templates/osquery.template b/salt/soctopus/files/templates/osquery.template deleted file mode 100644 index eb1857bb6..000000000 --- a/salt/soctopus/files/templates/osquery.template +++ /dev/null @@ -1,13 +0,0 @@ -alert: -- "modules.so.playbook-es.PlaybookESAlerter" - -elasticsearch_host: "{{ GLOBALS.url_base }}:9200" -play_title: "" -event.module: "playbook" -event.dataset: "alert" -event.severity: -rule.category: -play_url: "https://{{ GLOBALS.url_base }}/playbook/issues/6000" -kibana_pivot: "https://{{ GLOBALS.url_base }}/kibana/app/kibana#/discover?_g=()&_a=(columns:!(_source),interval:auto,query:(language:lucene,query:'_id:{[_id]}'),sort:!('@timestamp',desc))" -soc_pivot: "https://{{ GLOBALS.url_base }}/#/hunt" -sigma_level: "" diff --git a/salt/soctopus/init.sls b/salt/soctopus/init.sls deleted file mode 100644 index c9359a68c..000000000 --- a/salt/soctopus/init.sls +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'soctopus/map.jinja' import SOCTOPUSMERGED %} - -include: -{% if SOCTOPUSMERGED.enabled %} - - soctopus.enabled -{% else %} - - soctopus.disabled -{% endif %} diff --git a/salt/soctopus/map.jinja b/salt/soctopus/map.jinja deleted file mode 100644 index 07df21dbb..000000000 --- a/salt/soctopus/map.jinja +++ /dev/null @@ -1,7 +0,0 @@ -{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one - or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at - https://securityonion.net/license; you may not use this file except in compliance with the - Elastic License 2.0. #} - -{% import_yaml 'soctopus/defaults.yaml' as SOCTOPUSDEFAULTS %} -{% set SOCTOPUSMERGED = salt['pillar.get']('soctopus', SOCTOPUSDEFAULTS.soctopus, merge=True) %} diff --git a/salt/soctopus/soc_soctopus.yaml b/salt/soctopus/soc_soctopus.yaml deleted file mode 100644 index 4c235ebb7..000000000 --- a/salt/soctopus/soc_soctopus.yaml +++ /dev/null @@ -1,10 +0,0 @@ -soctopus: - enabled: - description: You can enable or disable SOCtopus. - helpLink: playbook.html - playbook: - rulesets: - description: List of playbook rulesets. - advanced: True - helpLink: playbook.html - global: True diff --git a/salt/soctopus/sostatus.sls b/salt/soctopus/sostatus.sls deleted file mode 100644 index 8a888235e..000000000 --- a/salt/soctopus/sostatus.sls +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} - -append_so-soctopus_so-status.conf: - file.append: - - name: /opt/so/conf/so-status/so-status.conf - - text: so-soctopus - - unless: grep -q so-soctopus /opt/so/conf/so-status/so-status.conf - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - -{% endif %} diff --git a/salt/soctopus/tools/sbin/so-soctopus-restart b/salt/soctopus/tools/sbin/so-soctopus-restart deleted file mode 100755 index 24b3aff85..000000000 --- a/salt/soctopus/tools/sbin/so-soctopus-restart +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - - -. /usr/sbin/so-common - -/usr/sbin/so-restart soctopus $1 diff --git a/salt/soctopus/tools/sbin/so-soctopus-start b/salt/soctopus/tools/sbin/so-soctopus-start deleted file mode 100755 index 990ece70e..000000000 --- a/salt/soctopus/tools/sbin/so-soctopus-start +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - - -. /usr/sbin/so-common - -/usr/sbin/so-start soctopus $1 diff --git a/salt/soctopus/tools/sbin/so-soctopus-stop b/salt/soctopus/tools/sbin/so-soctopus-stop deleted file mode 100755 index 39efa6435..000000000 --- a/salt/soctopus/tools/sbin/so-soctopus-stop +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - - -. /usr/sbin/so-common - -/usr/sbin/so-stop soctopus $1 diff --git a/salt/top.sls b/salt/top.sls index 16b355476..d4852aa4d 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -67,7 +67,6 @@ base: - idstools - suricata.manager - healthcheck - - mysql - elasticsearch - elastic-fleet-package-registry - kibana @@ -78,8 +77,6 @@ base: - curator.disabled - elastalert - utility - - soctopus - - playbook - elasticfleet '*_manager and G@saltversion:{{saltversion}}': @@ -99,7 +96,6 @@ base: - backup.config_backup - idstools - suricata.manager - - mysql - elasticsearch - logstash - redis @@ -108,8 +104,6 @@ base: - curator.disabled - elastalert - utility - - soctopus - - playbook - elasticfleet - stig @@ -132,7 +126,6 @@ base: - idstools - suricata.manager - healthcheck - - mysql - elasticsearch - logstash - redis @@ -145,8 +138,6 @@ base: - curator.disabled - elastalert - utility - - soctopus - - playbook - elasticfleet - stig @@ -179,7 +170,6 @@ base: - backup.config_backup - idstools - suricata.manager - - mysql - elasticsearch - logstash - redis @@ -188,8 +178,6 @@ base: - kibana - elastalert - utility - - soctopus - - playbook - elasticfleet - stig diff --git a/setup/so-functions b/setup/so-functions index 4aae0f5bd..0d66a2621 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -792,7 +792,6 @@ create_manager_pillars() { create_global create_sensoroni_pillar backup_pillar - soctopus_pillar docker_pillar redis_pillar idstools_pillar @@ -1109,10 +1108,6 @@ generate_ssl() { generate_passwords(){ title "Generate Random Passwords" - MYSQLPASS=$(get_random_value) - PLAYBOOKDBPASS=$(get_random_value) - PLAYBOOKADMINPASS=$(get_random_value) - PLAYBOOKAUTOMATIONPASS=$(get_random_value) INFLUXPASS=$(get_random_value) INFLUXTOKEN=$(head -c 64 /dev/urandom | base64 --wrap=0) SENSORONIKEY=$(get_random_value) @@ -1167,11 +1162,6 @@ install_cleanup() { # that will disrupt automated tests should be placed beneath this statement. [ -n "$TESTING" ] && return - # If Mysql is running stop it - if docker ps --format "{{.Names}}" 2>&1 | grep -q "so-mysql"; then - logVmd "/usr/sbin/so-mysql-stop" - fi - if [[ $setup_type == 'iso' ]]; then info "Removing so-setup permission entry from sudoers file" logCmd "sed -i '/so-setup/d' /etc/sudoers" @@ -1279,17 +1269,11 @@ telegraf_pillar() { manager_pillar() { touch $adv_manager_pillar_file title "Create the manager pillar" - if [[ $is_import ]]; then - PLAYBOOK=0 - else - PLAYBOOK=1 - fi printf '%s\n'\ "manager:"\ " proxy: '$so_proxy'"\ " no_proxy: '$no_proxy_string'"\ " elastalert: 1"\ - " playbook: $PLAYBOOK"\ "" > "$manager_pillar_file" } @@ -1362,16 +1346,6 @@ backup_pillar() { touch $adv_backup_pillar_file } -soctopus_pillar() { - title "Create the soctopus pillar file" - touch $adv_soctopus_pillar_file - printf '%s\n'\ - "soctopus:"\ - " playbook:"\ - " rulesets:"\ - " - windows" > "$soctopus_pillar_file" -} - docker_pillar() { title "Create the docker pillar file" touch $adv_docker_pillar_file @@ -1413,7 +1387,7 @@ make_some_dirs() { mkdir -p $local_salt_dir/salt/firewall/portgroups mkdir -p $local_salt_dir/salt/firewall/ports - for THEDIR in bpf pcap elasticsearch ntp firewall redis backup influxdb strelka sensoroni soc soctopus docker zeek suricata nginx telegraf logstash soc manager kratos idstools idh elastalert stig global;do + for THEDIR in bpf pcap elasticsearch ntp firewall redis backup influxdb strelka sensoroni soc docker zeek suricata nginx telegraf logstash soc manager kratos idstools idh elastalert stig global;do mkdir -p $local_salt_dir/pillar/$THEDIR touch $local_salt_dir/pillar/$THEDIR/adv_$THEDIR.sls touch $local_salt_dir/pillar/$THEDIR/soc_$THEDIR.sls @@ -1949,7 +1923,6 @@ saltify() { salt_install_module_deps() { logCmd "salt-pip install docker --no-index --only-binary=:all: --find-links files/salt_module_deps/docker/" - logCmd "salt-pip install pymysql --no-index --only-binary=:all: --find-links files/salt_module_deps/pymysql/" } salt_patch_x509_v2() { @@ -1967,11 +1940,6 @@ secrets_pillar(){ mkdir -p $local_salt_dir/pillar printf '%s\n'\ "secrets:"\ - " mysql: $MYSQLPASS"\ - " playbook_db: $PLAYBOOKDBPASS"\ - " playbook_admin: $PLAYBOOKADMINPASS"\ - " playbook_automation: $PLAYBOOKAUTOMATIONPASS"\ - " playbook_automation_api_key: "\ " import_pass: $IMPORTPASS"\ " influx_pass: $INFLUXPASS" > $local_salt_dir/pillar/secrets.sls fi diff --git a/setup/so-setup b/setup/so-setup index e2de39f50..2f62dca78 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -775,10 +775,6 @@ if ! [[ -f $install_opt_file ]]; then error "Failed to run so-elastic-fleet-setup" fail_setup fi - if [[ ! $is_import ]]; then - title "Setting up Playbook" - logCmd "so-playbook-reset" - fi checkin_at_boot set_initial_firewall_access logCmd "salt-call schedule.enable -linfo --local" diff --git a/setup/so-variables b/setup/so-variables index 511dfc43b..42ed8fc5c 100644 --- a/setup/so-variables +++ b/setup/so-variables @@ -112,12 +112,6 @@ export sensoroni_pillar_file adv_sensoroni_pillar_file="$local_salt_dir/pillar/sensoroni/adv_sensoroni.sls" export adv_sensoroni_pillar_file -soctopus_pillar_file="$local_salt_dir/pillar/soctopus/soc_soctopus.sls" -export soctopus_pillar_file - -adv_soctopus_pillar_file="$local_salt_dir/pillar/soctopus/adv_soctopus.sls" -export adv_soctopus_pillar_file - docker_pillar_file="$local_salt_dir/pillar/docker/soc_docker.sls" export docker_pillar From 94ee761207b92a17f6012d1aca01694bc6e2eac6 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Mon, 25 Mar 2024 21:11:47 -0400 Subject: [PATCH 259/777] Remove Playbook ref --- salt/soc/merged.map.jinja | 4 ---- 1 file changed, 4 deletions(-) diff --git a/salt/soc/merged.map.jinja b/salt/soc/merged.map.jinja index c22ed2210..b2362a20e 100644 --- a/salt/soc/merged.map.jinja +++ b/salt/soc/merged.map.jinja @@ -41,10 +41,6 @@ {% do SOCMERGED.config.server.modules.strelkaengine.update({'autoUpdateEnabled': false}) %} {% endif %} -{% if pillar.manager.playbook == 0 %} -{% do SOCMERGED.config.server.client.inactiveTools.append('toolPlaybook') %} -{% endif %} - {% set standard_actions = SOCMERGED.config.pop('actions') %} {% if pillar.global.endgamehost != '' %} From 20bd9a9701f3226f22be500692e6e4714e7755f4 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Tue, 26 Mar 2024 07:39:24 -0400 Subject: [PATCH 260/777] FEATURE: Include additional groupby fields in Dashboards relating to sankey diagrams #12657 --- salt/soc/defaults.yaml | 128 ++++++++++++++++++++++------------------- 1 file changed, 69 insertions(+), 59 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 8bb180567..db3fcdaf8 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -409,7 +409,7 @@ soc: - source.port - destination.ip - destination.port - - smtp.from + - smtp.mail_from - smtp.recipient_to - smtp.subject - smtp.useragent @@ -1166,6 +1166,16 @@ soc: - system.auth.sudo.command - event.dataset - message + ':opencanary:': + - soc_timestamp + - source.ip + - source.port + - logdata.HOSTNAME + - destination.port + - logdata.PATH + - logdata.USERNAME + - logdata.USERAGENT + - event.dataset server: bindAddress: 0.0.0.0:9822 baseUrl: / @@ -1601,55 +1611,55 @@ soc: queries: - name: Overview description: Overview of all events - query: '* | groupby -sankey event.dataset event.category* | groupby -pie event.category | groupby -bar event.module* | groupby event.dataset | groupby event.module* | groupby event.category | groupby observer.name | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + query: '* | groupby event.category | groupby -sankey event.category event.module | groupby event.module | groupby -sankey event.module event.dataset | groupby event.dataset | groupby observer.name | groupby host.name | groupby source.ip | groupby destination.ip | groupby destination.port' - name: SOC Auth description: SOC (Security Onion Console) authentication logs - query: 'event.dataset:kratos.audit AND msg:*authenticated* | groupby -sankey http_request.headers.x-real-ip identity_id | groupby http_request.headers.x-real-ip | groupby identity_id | groupby http_request.headers.user-agent' + query: 'event.dataset:kratos.audit AND msg:*authenticated* | groupby http_request.headers.x-real-ip | groupby -sankey http_request.headers.x-real-ip identity_id | groupby identity_id | groupby http_request.headers.user-agent' - name: Elastalerts description: Elastalert logs query: '_index: "*:elastalert*" | groupby rule_name | groupby alert_info.type' - name: Alerts description: Overview of all alerts - query: 'tags:alert | groupby event.module* | groupby rule.name | groupby event.severity | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + query: 'tags:alert | groupby event.module* | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby rule.name | groupby event.severity | groupby destination_geo.organization_name' - name: NIDS Alerts description: NIDS (Network Intrusion Detection System) alerts - query: 'event.category:network AND tags:alert | groupby rule.category | groupby -sankey source.ip destination.ip | groupby rule.name | groupby rule.uuid | groupby rule.gid | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + query: 'event.category:network AND tags:alert | groupby rule.category | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby rule.name | groupby rule.uuid | groupby rule.gid | groupby destination_geo.organization_name' - name: Sysmon Overview description: Overview of all Sysmon data types - query: 'event.dataset:windows.sysmon_operational | groupby -sankey event.action host.name | groupby -sankey host.name user.name | groupby host.name | groupby event.category event.action | groupby user.name | groupby dns.question.name | groupby process.executable | groupby winlog.event_data.TargetObject | groupby file.name | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'event.dataset:windows.sysmon_operational | groupby event.action | groupby -sankey event.action host.name | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby event.category event.action | groupby dns.question.name | groupby process.executable | groupby file.name | groupby source.ip | groupby destination.ip | groupby destination.port' - name: Host Overview description: Overview of all host data types - query: '((event.category:registry OR event.category:host OR event.category:process OR event.category:driver OR event.category:configuration) OR (event.category:file AND _exists_:process.executable) OR (event.category:network AND _exists_:host.name)) | groupby event.dataset* event.category* event.action* | groupby event.type | groupby host.name | groupby user.name | groupby file.name | groupby process.executable' + query: '((event.category:registry OR event.category:host OR event.category:process OR event.category:driver OR event.category:configuration) OR (event.category:file AND _exists_:process.executable) OR (event.category:network AND _exists_:host.name)) | groupby event.dataset* event.category* event.action* | groupby event.type | groupby -sankey event.type host.name | groupby host.name | groupby user.name | groupby file.name | groupby process.executable' - name: Host Registry Changes description: Windows Registry changes - query: 'event.category: registry | groupby -sankey event.action host.name | groupby event.dataset event.action | groupby host.name | groupby process.executable | groupby registry.path | groupby process.executable registry.path' + query: 'event.category: registry | groupby event.action | groupby -sankey event.action host.name | groupby host.name | groupby event.dataset event.action | groupby process.executable | groupby registry.path | groupby process.executable registry.path' - name: Host DNS & Process Mappings description: DNS queries mapped to originating processes - query: 'event.category: network AND _exists_:process.executable AND (_exists_:dns.question.name OR _exists_:dns.answers.data) | groupby -sankey host.name dns.question.name | groupby event.dataset event.type | groupby host.name | groupby process.executable | groupby dns.question.name | groupby dns.answers.data' + query: 'event.category: network AND _exists_:process.executable AND (_exists_:dns.question.name OR _exists_:dns.answers.data) | groupby host.name | groupby -sankey host.name dns.question.name | groupby dns.question.name | groupby event.dataset event.type | groupby process.executable | groupby dns.answers.data' - name: Host Process Activity description: Process activity captured on an endpoint - query: 'event.category:process | groupby -sankey host.name user.name* | groupby event.dataset event.action | groupby host.name | groupby user.name | groupby process.working_directory | groupby process.executable | groupby process.command_line | groupby process.parent.executable | groupby process.parent.command_line | groupby -sankey process.parent.executable process.executable | table soc_timestamp host.name user.name process.parent.name process.name event.action process.working_directory event.dataset' + query: 'event.category:process | groupby host.name | groupby -sankey host.name user.name* | groupby user.name | groupby event.dataset event.action | groupby process.working_directory | groupby process.executable | groupby process.command_line | groupby process.parent.executable | groupby process.parent.command_line | groupby -sankey process.parent.executable process.executable | table soc_timestamp host.name user.name process.parent.name process.name event.action process.working_directory event.dataset' - name: Host File Activity description: File activity captured on an endpoint - query: 'event.category: file AND _exists_:process.executable | groupby -sankey host.name process.executable | groupby host.name | groupby event.dataset event.action event.type | groupby file.name | groupby process.executable' + query: 'event.category: file AND _exists_:process.executable | groupby host.name | groupby -sankey host.name process.executable | groupby process.executable | groupby event.dataset event.action event.type | groupby file.name' - name: Host Network & Process Mappings description: Network activity mapped to originating processes - query: 'event.category: network AND _exists_:process.executable | groupby -sankey event.action host.name | groupby -sankey host.name user.name | groupby event.dataset* event.type* event.action* | groupby host.name | groupby user.name | groupby dns.question.name | groupby process.executable | groupby winlog.event_data.TargetObject | groupby process.name | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'event.category: network AND _exists_:process.executable | groupby event.action | groupby -sankey event.action host.name | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby event.dataset* event.type* event.action* | groupby dns.question.name | groupby process.executable | groupby process.name | groupby source.ip | groupby destination.ip | groupby destination.port' - name: Host API Events description: API (Application Programming Interface) events from endpoints - query: 'event.dataset:endpoint.events.api | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby process.name | groupby process.Ext.api.name' + query: 'event.dataset:endpoint.events.api | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby -sankey user.name process.name | groupby process.name | groupby process.Ext.api.name' - name: Host Library Events description: Library events from endpoints - query: 'event.dataset:endpoint.events.library | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby process.name | groupby event.action | groupby dll.path | groupby dll.code_signature.status | groupby dll.code_signature.subject_name' + query: 'event.dataset:endpoint.events.library | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby -sankey user.name process.name | groupby process.name | groupby event.action | groupby dll.path | groupby dll.code_signature.status | groupby dll.code_signature.subject_name' - name: Host Security Events description: Security events from endpoints - query: 'event.dataset:endpoint.events.security | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby process.executable | groupby event.action | groupby event.outcome' + query: 'event.dataset:endpoint.events.security | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby -sankey user.name process.executable | groupby process.executable | groupby event.action | groupby event.outcome' - name: Strelka description: Strelka file analysis - query: 'event.module:strelka | groupby file.mime_type | groupby -sankey file.mime_type file.source | groupby file.source | groupby file.name' + query: 'event.module:strelka | groupby file.mime_type | groupby -sankey file.mime_type file.source | groupby file.source | groupby -sankey file.source file.name | groupby file.name' - name: Zeek Notice description: Zeek notice logs - query: 'event.dataset:zeek.notice | groupby -sankey notice.note destination.ip | groupby notice.note | groupby notice.message | groupby notice.sub_message | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + query: 'event.dataset:zeek.notice | groupby notice.note | groupby -sankey notice.note source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby notice.message | groupby notice.sub_message | groupby source_geo.organization_name | groupby destination_geo.organization_name' - name: Connections and Metadata with community_id description: Network connections that include community_id query: '_exists_:network.community_id | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid' @@ -1658,145 +1668,145 @@ soc: query: 'tags:conn | groupby source.ip | groupby destination.ip | groupby destination.port | groupby -sankey destination.port network.protocol | groupby network.protocol | groupby network.transport | groupby connection.history | groupby connection.state | groupby connection.state_description | groupby source.geo.country_name | groupby destination.geo.country_name | groupby client.ip_bytes | groupby server.ip_bytes | groupby client.oui' - name: DCE_RPC description: DCE_RPC (Distributed Computing Environment / Remote Procedure Calls) network metadata - query: 'tags:dce_rpc | groupby -sankey dce_rpc.endpoint dce_rpc.operation | groupby dce_rpc.endpoint | groupby dce_rpc.operation | groupby dce_rpc.named_pipe | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + query: 'tags:dce_rpc | groupby dce_rpc.endpoint | groupby -sankey dce_rpc.endpoint dce_rpc.operation | groupby dce_rpc.operation | groupby -sankey dce_rpc.operation dce_rpc.named_pipe | groupby dce_rpc.named_pipe | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' - name: DHCP description: DHCP (Dynamic Host Configuration Protocol) leases - query: 'tags:dhcp | groupby host.hostname | groupby dhcp.message_types | groupby -sankey client.address server.address | groupby client.address | groupby server.address | groupby host.domain' + query: 'tags:dhcp | groupby host.hostname | groupby -sankey host.hostname client.address | groupby client.address | groupby -sankey client.address server.address | groupby server.address | groupby dhcp.message_types | groupby host.domain' - name: DNS description: DNS (Domain Name System) queries - query: 'tags:dns | groupby dns.query.name | groupby dns.highest_registered_domain | groupby dns.parent_domain | groupby -sankey source.ip destination.ip | groupby dns.answers.name | groupby dns.query.type_name | groupby dns.response.code_name | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + query: 'tags:dns | groupby dns.query.name | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby dns.highest_registered_domain | groupby dns.parent_domain | groupby dns.response.code_name | groupby dns.answers.name | groupby dns.query.type_name | groupby dns.response.code_name | groupby destination_geo.organization_name' - name: DPD description: DPD (Dynamic Protocol Detection) errors - query: 'tags:dpd | groupby error.reason | groupby network.protocol | groupby -sankey source.ip destination.ip | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + query: 'tags:dpd | groupby error.reason | groupby -sankey error.reason source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby network.protocol | groupby destination_geo.organization_name' - name: Files description: Files seen in network traffic query: 'tags:file | groupby file.mime_type | groupby -sankey file.mime_type file.source | groupby file.source | groupby file.bytes.total | groupby source.ip | groupby destination.ip | groupby destination_geo.organization_name' - name: FTP description: FTP (File Transfer Protocol) network metadata - query: 'tags:ftp | groupby -sankey ftp.command destination.ip | groupby ftp.command | groupby ftp.argument | groupby ftp.user | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + query: 'tags:ftp | groupby ftp.command | groupby -sankey ftp.command source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name | groupby ftp.argument | groupby ftp.user' - name: HTTP description: HTTP (Hyper Text Transport Protocol) network metadata query: 'tags:http | groupby http.method | groupby -sankey http.method http.virtual_host | groupby http.virtual_host | groupby http.uri | groupby http.useragent | groupby http.status_code | groupby http.status_message | groupby file.resp_mime_types | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' - name: Intel description: Zeek Intel framework hits - query: 'tags:intel | groupby intel.indicator | groupby -sankey source.ip intel.indicator | groupby intel.indicator_type | groupby intel.seen_where | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'tags:intel | groupby intel.indicator | groupby -sankey intel.indicator source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby intel.indicator_type | groupby intel.seen_where' - name: IRC description: IRC (Internet Relay Chat) network metadata query: 'tags:irc | groupby irc.command.type | groupby -sankey irc.command.type irc.username | groupby irc.username | groupby irc.nickname | groupby irc.command.value | groupby irc.command.info | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' - name: Kerberos description: Kerberos network metadata - query: 'tags:kerberos | groupby kerberos.service | groupby -sankey kerberos.service destination.ip | groupby kerberos.client | groupby kerberos.request_type | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'tags:kerberos | groupby kerberos.service | groupby -sankey kerberos.service source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby kerberos.client | groupby kerberos.request_type' - name: MySQL description: MySQL network metadata - query: 'tags:mysql | groupby mysql.command | groupby -sankey mysql.command destination.ip | groupby mysql.argument | groupby mysql.success | groupby mysql.response | groupby mysql.rows | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'tags:mysql | groupby mysql.command | groupby -sankey mysql.command source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby mysql.argument | groupby mysql.success | groupby mysql.response | groupby mysql.rows' - name: NTLM description: NTLM (New Technology LAN Manager) network metadata - query: 'tags:ntlm | groupby ntlm.server.dns.name | groupby ntlm.server.nb.name | groupby -sankey source.ip destination.ip | groupby ntlm.server.tree.name | groupby ntlm.success | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'tags:ntlm | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby ntlm.server.dns.name | groupby ntlm.server.nb.name | groupby ntlm.server.tree.name | groupby ntlm.success | groupby source.ip | groupby destination.ip' - name: PE description: PE (Portable Executable) files transferred via network traffic - query: 'tags:pe | groupby file.machine | groupby -sankey file.machine file.os | groupby file.os | groupby file.subsystem | groupby file.section_names | groupby file.is_exe | groupby file.is_64bit' + query: 'tags:pe | groupby file.machine | groupby -sankey file.machine file.os | groupby file.os | groupby -sankey file.os file.subsystem | groupby file.subsystem | groupby file.section_names | groupby file.is_exe | groupby file.is_64bit' - name: RADIUS description: RADIUS (Remote Authentication Dial-In User Service) network metadata - query: 'tags:radius | groupby -sankey user.name destination.ip | groupby user.name | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + query: 'tags:radius | groupby user.name | groupby -sankey user.name source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' - name: RDP description: RDP (Remote Desktop Protocol) network metadata - query: 'tags:rdp | groupby client.name | groupby -sankey source.ip destination.ip | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + query: 'tags:rdp | groupby client.name | groupby -sankey client.name source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' - name: RFB description: RFB (Remote Frame Buffer) network metadata - query: 'tags:rfb | groupby rfb.desktop.name | groupby -sankey source.ip destination.ip | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + query: 'tags:rfb | groupby rfb.desktop.name | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' - name: Signatures description: Zeek signatures query: 'event.dataset:zeek.signatures | groupby signature_id' - name: SIP description: SIP (Session Initiation Protocol) network metadata - query: 'tags:sip | groupby client.user_agent | groupby sip.method | groupby sip.uri | groupby -sankey source.ip destination.ip | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + query: 'tags:sip | groupby sip.method | groupby -sankey sip.method source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name | groupby client.user_agent | groupby sip.method | groupby sip.uri' - name: SMB_Files description: Files transferred via SMB (Server Message Block) - query: 'tags:smb_files | groupby file.action | groupby file.path | groupby file.name | groupby -sankey source.ip destination.ip | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'tags:smb_files | groupby file.action | groupby -sankey file.action source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby file.path | groupby file.name' - name: SMB_Mapping description: SMB (Server Message Block) mapping network metadata - query: 'tags:smb_mapping | groupby smb.share_type | groupby smb.path | groupby smb.service | groupby -sankey source.ip destination.ip | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'tags:smb_mapping | groupby smb.share_type | groupby -sankey smb.share_type smb.path | groupby smb.path | groupby -sankey smb.path smb.service | groupby smb.service | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port' - name: SMTP description: SMTP (Simple Mail Transfer Protocol) network metadata - query: 'tags:smtp | groupby smtp.from | groupby smtp.recipient_to | groupby -sankey source.ip destination.ip | groupby smtp.subject | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + query: 'tags:smtp | groupby smtp.mail_from | groupby -sankey smtp.mail_from smtp.recipient_to | groupby smtp.recipient_to | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby smtp.subject | groupby destination_geo.organization_name' - name: SNMP description: SNMP (Simple Network Management Protocol) network metadat - query: 'tags:snmp | groupby snmp.community | groupby snmp.version | groupby -sankey source.ip destination.ip | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'tags:snmp | groupby snmp.community | groupby -sankey snmp.community source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby snmp.version' - name: Software description: Software seen by Zeek via network traffic - query: 'tags:software | groupby -sankey software.type source.ip | groupby software.type | groupby software.name | groupby source.ip' + query: 'tags:software | groupby software.type | groupby -sankey software.type source.ip | groupby source.ip | groupby software.name' - name: SSH description: SSH (Secure Shell) connections seen by Zeek - query: 'tags:ssh | groupby ssh.client | groupby ssh.server | groupby -sankey source.ip destination.ip | groupby ssh.direction | groupby ssh.version | groupby ssh.hassh_version | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + query: 'tags:ssh | groupby ssh.client | groupby -sankey ssh.client source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby ssh.server | groupby ssh.version | groupby ssh.hassh_version | groupby ssh.direction | groupby source_geo.organization_name | groupby destination_geo.organization_name' - name: SSL description: SSL/TLS network metadata - query: 'tags:ssl | groupby ssl.version | groupby ssl.validation_status | groupby -sankey source.ip ssl.server_name | groupby ssl.server_name | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name | groupby ssl.certificate.issuer | groupby ssl.certificate.subject' + query: 'tags:ssl | groupby ssl.version | groupby ssl.validation_status | groupby -sankey ssl.validation_status ssl.server_name | groupby ssl.server_name | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name | groupby ssl.certificate.issuer | groupby ssl.certificate.subject' - name: STUN description: STUN (Session Traversal Utilities for NAT) network metadata - query: 'tags:stun* | groupby -sankey source.ip destination.ip | groupby destination.geo.country_name | groupby source.ip | groupby destination.ip | groupby destination.port | groupby event.dataset' + query: 'tags:stun* | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination.geo.country_name | groupby event.dataset' - name: Syslog description: Syslog logs - query: 'tags:syslog | groupby syslog.severity_label | groupby syslog.facility_label | groupby -sankey source.ip destination.ip | groupby source.ip | groupby destination.ip | groupby destination.port | groupby network.protocol' + query: 'tags:syslog | groupby syslog.severity_label | groupby syslog.facility_label | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby network.protocol | groupby event.dataset' - name: TDS description: TDS (Tabular Data Stream) network metadata - query: 'tags:tds* | groupby -sankey event.dataset source.ip destination.ip | groupby event.dataset | groupby tds.command | groupby tds.header_type | groupby tds.procedure_name | groupby source.ip | groupby destination.ip | groupby destination.port | groupby tds.query' + query: 'tags:tds* | groupby event.dataset | groupby -sankey event.dataset source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby tds.command | groupby tds.header_type | groupby tds.procedure_name | groupby tds.query' - name: Tunnel description: Tunnels seen by Zeek - query: 'tags:tunnel | groupby -sankey source.ip destination.ip | groupby tunnel.type | groupby event.action | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination.geo.country_name' + query: 'tags:tunnel | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby tunnel.type | groupby event.action | groupby destination.geo.country_name' - name: Weird description: Weird network traffic seen by Zeek - query: 'event.dataset:zeek.weird | groupby -sankey weird.name destination.ip | groupby weird.name | groupby weird.additional_info | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + query: 'event.dataset:zeek.weird | groupby weird.name | groupby -sankey weird.name source.ip | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' - name: WireGuard description: WireGuard VPN network metadata - query: 'tags:wireguard | groupby -sankey source.ip destination.ip | groupby destination.geo.country_name | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'tags:wireguard | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination.geo.country_name' - name: x509 description: x.509 certificates seen by Zeek - query: 'tags:x509 | groupby -sankey x509.certificate.key.length x509.san_dns | groupby x509.certificate.key.length | groupby x509.san_dns | groupby x509.certificate.key.type | groupby x509.certificate.subject | groupby x509.certificate.issuer' + query: 'tags:x509 | groupby x509.certificate.key.length | groupby -sankey x509.certificate.key.length x509.san_dns | groupby x509.san_dns | groupby x509.certificate.key.type | groupby x509.certificate.subject | groupby x509.certificate.issuer' - name: ICS Overview description: Overview of ICS (Industrial Control Systems) network metadata - query: 'tags:ics | groupby event.dataset | groupby -sankey source.ip destination.ip | groupby source.ip | groupby destination.ip | groupby destination.port | groupby source.mac | groupby destination.mac' + query: 'tags:ics | groupby event.dataset | groupby -sankey event.dataset source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby source.mac | groupby destination.mac' - name: ICS BACnet description: BACnet (Building Automation and Control Networks) network metadata - query: 'tags:bacnet* | groupby -sankey event.dataset source.ip destination.ip | groupby event.dataset | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'tags:bacnet* | groupby event.dataset | groupby -sankey event.dataset source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port' - name: ICS BSAP description: BSAP (Bristol Standard Asynchronous Protocol) network metadata - query: 'tags:bsap* | groupby -sankey event.dataset source.ip destination.ip | groupby event.dataset | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'tags:bsap* | groupby event.dataset | groupby -sankey event.dataset source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port' - name: ICS CIP description: CIP (Common Industrial Protocol) network metadata - query: 'tags:cip* | groupby -sankey event.dataset source.ip destination.ip | groupby event.dataset | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'tags:cip* | groupby event.dataset | groupby -sankey event.dataset source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port' - name: ICS COTP description: COTP (Connection Oriented Transport Protocol) network metadata - query: 'tags:cotp* | groupby -sankey source.ip destination.ip | groupby cotp.pdu.name | groupby cotp.pdu.code | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'tags:cotp* | groupby cotp.pdu.name | groupby -sankey cotp.pdu.name source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby cotp.pdu.code' - name: ICS DNP3 description: DNP3 (Distributed Network Protocol) network metadata - query: 'tags:dnp3* | groupby -sankey event.dataset source.ip destination.ip | groupby event.dataset | groupby dnp3.function_code | groupby dnp3.object_type | groupby dnp3.fc_request | groupby dnp3.fc_reply | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'tags:dnp3* | groupby event.dataset | groupby -sankey event.dataset source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby dnp3.function_code | groupby dnp3.object_type | groupby dnp3.fc_request | groupby dnp3.fc_reply' - name: ICS ECAT description: ECAT (Ethernet for Control Automation Technology) network metadata - query: 'tags:ecat* | groupby -sankey event.dataset source.mac destination.mac | groupby event.dataset | groupby source.mac | groupby destination.mac | groupby ecat.command | groupby ecat.register.type' + query: 'tags:ecat* | groupby event.dataset | groupby -sankey event.dataset ecat.command | groupby ecat.command | groupby -sankey ecat.command source.mac | groupby source.mac | groupby -sankey source.mac destination.mac | groupby destination.mac | groupby ecat.register.type' - name: ICS ENIP description: ENIP (Ethernet Industrial Protocol) network metadata - query: 'tags:enip* | groupby -sankey source.ip destination.ip | groupby enip.command | groupby enip.status_code | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'tags:enip* | groupby enip.command | groupby -sankey enip.command source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby enip.status_code' - name: ICS Modbus description: Modbus network metadata - query: 'tags:modbus* | groupby -sankey event.dataset modbus.function | groupby event.dataset | groupby modbus.function | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'tags:modbus* | groupby event.dataset | groupby -sankey event.dataset modbus.function | groupby modbus.function | groupby -sankey modbus.function source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port' - name: ICS OPC UA description: OPC UA (Unified Architecture) network metadata - query: 'tags:opcua* | groupby -sankey event.dataset source.ip destination.ip | groupby event.dataset | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'tags:opcua* | groupby event.dataset | groupby -sankey event.dataset source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port' - name: ICS Profinet description: Profinet (Process Field Network) network metadata - query: 'tags:profinet* | groupby -sankey event.dataset source.ip destination.ip | groupby event.dataset | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'tags:profinet* | groupby event.dataset | groupby -sankey event.dataset source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port' - name: ICS S7 description: S7 (Siemens) network metadata - query: 'tags:s7* | groupby -sankey event.dataset source.ip destination.ip | groupby event.dataset | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'tags:s7* | groupby event.dataset | groupby -sankey event.dataset source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port' - name: Firewall description: Firewall logs - query: 'observer.type:firewall | groupby -sankey event.action observer.ingress.interface.name | groupby event.action | groupby observer.ingress.interface.name | groupby network.type | groupby network.transport | groupby source.ip | groupby destination.ip | groupby destination.port' + query: 'observer.type:firewall | groupby event.action | groupby -sankey event.action observer.ingress.interface.name | groupby observer.ingress.interface.name | groupby network.type | groupby network.transport | groupby source.ip | groupby destination.ip | groupby destination.port' - name: Firewall Auth description: Firewall authentication logs query: 'observer.type:firewall AND event.category:authentication | groupby user.name | groupby -sankey user.name source.ip | groupby source.ip | table soc_timestamp user.name source.ip message' - name: VLAN description: VLAN (Virtual Local Area Network) tagged logs - query: '* AND _exists_:network.vlan.id | groupby network.vlan.id | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby event.dataset | groupby event.module | groupby observer.name | groupby source.geo.country_name | groupby destination.geo.country_name' + query: '* AND _exists_:network.vlan.id | groupby network.vlan.id | groupby -sankey network.vlan.id source.ip | groupby source.ip | groupby destination.ip | groupby destination.port | groupby event.dataset | groupby event.module | groupby observer.name | groupby source.geo.country_name | groupby destination.geo.country_name' - name: GeoIP - Destination Countries description: GeoIP tagged logs visualized by destination countries query: '* AND _exists_:destination.geo.country_name | groupby destination.geo.country_name | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name | groupby event.dataset | groupby event.module' From 7c4ea8a58e4ef2c6fef32cd32cba2386d9418fea Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Tue, 26 Mar 2024 07:39:39 -0400 Subject: [PATCH 261/777] Add Detections SOC Config --- salt/soc/soc_soc.yaml | 82 ++++++++++++++++++++++++++++++++++++++----- 1 file changed, 74 insertions(+), 8 deletions(-) diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index cb939f758..f1969b487 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -78,14 +78,38 @@ soc: advanced: True modules: elastalertengine: - sigmaRulePackages: - description: 'Defines the Sigma Community Ruleset you want to run. One of these (core | core+ | core++ | all ) as well as an optional Add-on (emerging_threats_addon). WARNING! Changing the ruleset will remove all existing Sigma rules of the previous ruleset and their associated overrides. This removal cannot be undone. (future use, not yet complete)' - global: True - advanced: False - autoUpdateEnabled: - description: 'Set to true to enable automatic Internet-connected updates of the Sigma Community Ruleset. If this is an Airgap system, this setting will be overridden and set to false. (future use, not yet complete)' + allowRegex: + description: 'Regex used to filter imported Sigma rules. Deny regex takes precedence over the Allow regex setting.' global: True advanced: True + helpLink: sigma.html + denyRegex: + description: 'Regex used to filter imported Sigma rules. Deny regex takes precedence over the Allow regex setting.' + global: True + advanced: True + helpLink: sigma.html + communityRulesImportFrequencySeconds: + description: 'How often to check for new Sigma rules (in seconds). This applies to both Community Rule Packages and any configured Git repos.' + global: True + advanced: True + helpLink: sigma.html + rulesRepos: + description: 'Custom git repos to pull Sigma rules from. License field is required, folder is optional.' + global: True + advanced: True + multiline: True + forcedType: "[]string" + helpLink: sigma.html + sigmaRulePackages: + description: 'Defines the Sigma Community Ruleset you want to run. One of these (core | core+ | core++ | all ) as well as an optional Add-on (emerging_threats_addon). WARNING! Changing the ruleset will remove all existing Sigma rules of the previous ruleset and their associated overrides. This removal cannot be undone.' + global: True + advanced: False + helpLink: sigma.html + autoUpdateEnabled: + description: 'Set to true to enable automatic Internet-connected updates of the Sigma Community Ruleset. If this is an Airgap system, this setting will be overridden and set to false.' + global: True + advanced: True + helpLink: sigma.html elastic: index: description: Comma-separated list of indices or index patterns (wildcard "*" supported) that SOC will search for records. @@ -148,10 +172,52 @@ soc: global: True advanced: True strelkaengine: - autoUpdateEnabled: - description: 'Set to true to enable automatic Internet-connected updates of the Yara rulesets. If this is an Airgap system, this setting will be overridden and set to false. (future use, not yet complete)' + allowRegex: + description: 'Regex used to filter imported Yara rules. Deny regex takes precedence over the Allow regex setting.' global: True advanced: True + helpLink: yara.html + autoUpdateEnabled: + description: 'Set to true to enable automatic Internet-connected updates of the Yara rulesets. If this is an Airgap system, this setting will be overridden and set to false.' + global: True + advanced: True + denyRegex: + description: 'Regex used to filter imported Yara rules. Deny regex takes precedence over the Allow regex setting.' + global: True + advanced: True + helpLink: yara.html + communityRulesImportFrequencySeconds: + description: 'How often to check for new Yara rules (in seconds). This applies to both Community Rules and any configured Git repos.' + global: True + advanced: True + helpLink: yara.html + rulesRepos: + description: 'Custom git repos to pull Sigma rules from. License field is required' + global: True + advanced: True + multiline: True + forcedType: "[]string" + helpLink: yara.html + suricataengine: + allowRegex: + description: 'Regex used to filter imported Suricata rules. Deny regex takes precedence over the Allow regex setting.' + global: True + advanced: True + helpLink: suricata.html + autoUpdateEnabled: + description: 'Set to true to enable automatic Internet-connected updates of the Suricata rulesets. If this is an Airgap system, this setting will be overridden and set to false.' + global: True + advanced: True + denyRegex: + description: 'Regex used to filter imported Suricata rules. Deny regex takes precedence over the Allow regex setting.' + global: True + advanced: True + helpLink: suricata.html + communityRulesImportFrequencySeconds: + description: 'How often to check for new Suricata rules (in seconds).' + global: True + advanced: True + helpLink: suricata.html client: enableReverseLookup: description: Set to true to enable reverse DNS lookups for IP addresses in the SOC UI. From cc0f4847ba20d773c12bbed82530eb9c1f11d95a Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Tue, 26 Mar 2024 08:10:57 -0400 Subject: [PATCH 262/777] Casing and validation --- salt/soc/soc_soc.yaml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index f1969b487..1456c71bf 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -94,10 +94,11 @@ soc: advanced: True helpLink: sigma.html rulesRepos: - description: 'Custom git repos to pull Sigma rules from. License field is required, folder is optional.' + description: 'Custom Git repos to pull Sigma rules from. License field is required, folder is optional.' global: True advanced: True multiline: True + syntax: json forcedType: "[]string" helpLink: sigma.html sigmaRulePackages: @@ -192,11 +193,12 @@ soc: advanced: True helpLink: yara.html rulesRepos: - description: 'Custom git repos to pull Sigma rules from. License field is required' + description: 'Custom Git repos to pull Sigma rules from. License field is required' global: True advanced: True multiline: True forcedType: "[]string" + syntax: json helpLink: yara.html suricataengine: allowRegex: From bbcd3116f707a995a9c581f933ead92f98aa8cc1 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Tue, 26 Mar 2024 09:31:46 -0400 Subject: [PATCH 263/777] Fixes --- salt/soc/soc_soc.yaml | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index 1456c71bf..eae52e31b 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -97,9 +97,7 @@ soc: description: 'Custom Git repos to pull Sigma rules from. License field is required, folder is optional.' global: True advanced: True - multiline: True - syntax: json - forcedType: "[]string" + forcedType: "[]{}" helpLink: sigma.html sigmaRulePackages: description: 'Defines the Sigma Community Ruleset you want to run. One of these (core | core+ | core++ | all ) as well as an optional Add-on (emerging_threats_addon). WARNING! Changing the ruleset will remove all existing Sigma rules of the previous ruleset and their associated overrides. This removal cannot be undone.' @@ -193,12 +191,10 @@ soc: advanced: True helpLink: yara.html rulesRepos: - description: 'Custom Git repos to pull Sigma rules from. License field is required' + description: 'Custom Git repos to pull Yara rules from. License field is required' global: True advanced: True - multiline: True - forcedType: "[]string" - syntax: json + forcedType: "[]{}" helpLink: yara.html suricataengine: allowRegex: From e2caf4668e5298b92b6b91fe7185c4e15df736d9 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Tue, 26 Mar 2024 16:08:41 -0400 Subject: [PATCH 264/777] FEATURE: Add Events table columns for event.module elastic_agent #12666 --- salt/soc/defaults.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 70db82fc6..861f6b02c 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1176,6 +1176,10 @@ soc: - logdata.USERNAME - logdata.USERAGENT - event.dataset + ':elastic_agent:': + - soc_timestamp + - event.dataset + - message server: bindAddress: 0.0.0.0:9822 baseUrl: / From d57f7730724e9134e5396a5f0d1da094be7bae98 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 27 Mar 2024 09:36:42 -0400 Subject: [PATCH 265/777] Fix regex to allow ipv6 in bpfs --- salt/suricata/soc_suricata.yaml | 2 +- salt/zeek/soc_zeek.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/suricata/soc_suricata.yaml b/salt/suricata/soc_suricata.yaml index c9ba80f01..b0a864329 100644 --- a/salt/suricata/soc_suricata.yaml +++ b/salt/suricata/soc_suricata.yaml @@ -145,7 +145,7 @@ suricata: address-groups: HOME_NET: description: List of hosts or networks. - regex: ^(([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?)?$ + regex: ^(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\/([0-9]|[1-2][0-9]|3[0-2]))?$|^((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?))|:))|(([0-9A-Fa-f]{1,4}:){5}((:[0-9A-Fa-f]{1,4}){1,2}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){4}((:[0-9A-Fa-f]{1,4}){1,3}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){3}((:[0-9A-Fa-f]{1,4}){1,4}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){2}((:[0-9A-Fa-f]{1,4}){1,5}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){1}((:[0-9A-Fa-f]{1,4}){1,6}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(:((:[0-9A-Fa-f]{1,4}){1,7}|:)))(\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8]))?$ regexFailureMessage: You must enter a valid IP address or CIDR. helpLink: suricata.html EXTERNAL_NET: diff --git a/salt/zeek/soc_zeek.yaml b/salt/zeek/soc_zeek.yaml index c69ce5ea1..bd5d88116 100644 --- a/salt/zeek/soc_zeek.yaml +++ b/salt/zeek/soc_zeek.yaml @@ -24,7 +24,7 @@ zeek: advanced: False helpLink: zeek.html multiline: True - regex: ^(([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?)?$ + regex: ^(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\/([0-9]|[1-2][0-9]|3[0-2]))?$|^((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?))|:))|(([0-9A-Fa-f]{1,4}:){5}((:[0-9A-Fa-f]{1,4}){1,2}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){4}((:[0-9A-Fa-f]{1,4}){1,3}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){3}((:[0-9A-Fa-f]{1,4}){1,4}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){2}((:[0-9A-Fa-f]{1,4}){1,5}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){1}((:[0-9A-Fa-f]{1,4}){1,6}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(:((:[0-9A-Fa-f]{1,4}){1,7}|:)))(\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8]))?$ regexFailureMessage: You must enter a valid IP address or CIDR. node: lb_procs: From b571eeb8e6425b197b0b2245d3dfb367b1bb5f93 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Wed, 27 Mar 2024 14:58:16 -0400 Subject: [PATCH 266/777] Initial cut of .70 soup changes --- salt/manager/tools/sbin/soup | 74 ++++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index a585f877c..87d88a57e 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -357,6 +357,7 @@ preupgrade_changes() { [[ "$INSTALLEDVERSION" == 2.4.30 ]] && up_to_2.4.40 [[ "$INSTALLEDVERSION" == 2.4.40 ]] && up_to_2.4.50 [[ "$INSTALLEDVERSION" == 2.4.50 ]] && up_to_2.4.60 + [[ "$INSTALLEDVERSION" == 2.4.60 ]] && up_to_2.4.70 true } @@ -373,6 +374,7 @@ postupgrade_changes() { [[ "$POSTVERSION" == 2.4.30 ]] && post_to_2.4.40 [[ "$POSTVERSION" == 2.4.40 ]] && post_to_2.4.50 [[ "$POSTVERSION" == 2.4.50 ]] && post_to_2.4.60 + [[ "$POSTVERSION" == 2.4.60 ]] && post_to_2.4.70 true } @@ -435,6 +437,11 @@ post_to_2.4.60() { POSTVERSION=2.4.60 } +post_to_2.4.70() { + echo "Nothing to apply" + POSTVERSION=2.4.70 +} + repo_sync() { echo "Sync the local repo." su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync." @@ -574,6 +581,73 @@ up_to_2.4.60() { INSTALLEDVERSION=2.4.60 } +up_to_2.4.70() { + # Start SOC Detections migration + mkdir -p /nsm/backup/detections-migration/{suricata,sigma/rules,elastalert} + + # Remove cronjobs + crontab -l | grep -v 'so-playbook-sync_cron' | crontab - + crontab -l | grep -v 'so-playbook-ruleupdate_cron' | crontab - + + # Check for active Elastalert rules + active_rules_count=$(find /opt/so/rules/elastalert/playbook/ -type f -name "*.yaml" | wc -l) + + if [[ "$active_rules_count" -gt 0 ]]; then + # Prompt the user to AGREE if active Elastalert rules found + echo + echo "$active_rules_count Active Elastalert/Playbook rules found." + echo "In preparation for the new Detections module, they will be backed up and then disabled." + echo + echo "If you would like to proceed, then type AGREE and press ENTER." + echo + # Read user input + read INPUT + if [ "${INPUT^^}" != 'AGREE' ]; then exit 0; fi + + echo "Backing up the Elastalert rules..." + rsync -av --stats /opt/so/rules/elastalert/playbook/*.yaml /nsm/backup/detections-migration/elastalert/ + + # Verify that rsync completed successfully + if [[ $? -eq 0 ]]; then + # Delete the Elastlaert rules + rm -f /opt/so/rules/elastalert/playbook/*.yaml + echo "Active Elastalert rules have been backed up." + else + echo "Error: rsync failed to copy the files. Active Elastalert rules have not been backed up." + exit 1 + fi + fi + + echo + echo "Exporting Sigma rules from Playbook..." + MYSQLPW=$(lookup_pillar_secret mysql) + + docker exec so-mysql sh -c "exec mysql -uroot -p${MYSQLPW} -D playbook -sN -e \"SELECT id, value FROM custom_values WHERE value LIKE '%View Sigma%'\"" | while read -r id value; do + echo -e "$value" > "/nsm/backup/detections-migration/sigma/rules/$id.yaml" + done + + echo + echo "Exporting Sigma Filters from Playbook..." + docker exec so-mysql sh -c "exec mysql -uroot -p${MYSQLPW} -D playbook -sN -e \"SELECT issues.subject as title, custom_values.value as filter FROM issues JOIN custom_values ON issues.id = custom_values.customized_id WHERE custom_values.value LIKE '%sofilter%'\"" > /nsm/backup/detections-migration/sigma/custom-filters.txt + + echo + echo "Backing up Playbook database..." + docker exec so-mysql sh -c "mysqldump -uroot -p${MYSQLPW} --databases playbook > /tmp/playbook-dump" + docker cp so-mysql:/tmp/playbook-dump /nsm/backup/detections-migration/sigma/playbook-dump.sql + + echo + echo "Stopping Playbook services..." + so-playbook-stop + so-mysql-stop + so-soctopus-stop + + # What about cleaning up various so-utilities like so-playbook-restart? + echo + echo "Playbook Migration is complete...." + + INSTALLEDVERSION=2.4.70 +} + determine_elastic_agent_upgrade() { if [[ $is_airgap -eq 0 ]]; then update_elastic_agent_airgap From ba262ee01a9fada97e21fe82019598ced83529fa Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Wed, 27 Mar 2024 15:43:25 -0400 Subject: [PATCH 267/777] Check to see if Playbook is enabled --- salt/manager/tools/sbin/soup | 85 +++++++++++++++++++----------------- 1 file changed, 44 insertions(+), 41 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 87d88a57e..62a579e18 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -589,52 +589,55 @@ up_to_2.4.70() { crontab -l | grep -v 'so-playbook-sync_cron' | crontab - crontab -l | grep -v 'so-playbook-ruleupdate_cron' | crontab - - # Check for active Elastalert rules - active_rules_count=$(find /opt/so/rules/elastalert/playbook/ -type f -name "*.yaml" | wc -l) + if grep -A 1 'playbook:' /opt/so/saltstack/local/pillar/minions/* | grep -q 'enabled: True'; then + + # Check for active Elastalert rules + active_rules_count=$(find /opt/so/rules/elastalert/playbook/ -type f -name "*.yaml" | wc -l) - if [[ "$active_rules_count" -gt 0 ]]; then - # Prompt the user to AGREE if active Elastalert rules found - echo - echo "$active_rules_count Active Elastalert/Playbook rules found." - echo "In preparation for the new Detections module, they will be backed up and then disabled." - echo - echo "If you would like to proceed, then type AGREE and press ENTER." - echo - # Read user input - read INPUT - if [ "${INPUT^^}" != 'AGREE' ]; then exit 0; fi + if [[ "$active_rules_count" -gt 0 ]]; then + # Prompt the user to AGREE if active Elastalert rules found + echo + echo "$active_rules_count Active Elastalert/Playbook rules found." + echo "In preparation for the new Detections module, they will be backed up and then disabled." + echo + echo "If you would like to proceed, then type AGREE and press ENTER." + echo + # Read user input + read INPUT + if [ "${INPUT^^}" != 'AGREE' ]; then exit 0; fi - echo "Backing up the Elastalert rules..." - rsync -av --stats /opt/so/rules/elastalert/playbook/*.yaml /nsm/backup/detections-migration/elastalert/ + echo "Backing up the Elastalert rules..." + rsync -av --stats /opt/so/rules/elastalert/playbook/*.yaml /nsm/backup/detections-migration/elastalert/ - # Verify that rsync completed successfully - if [[ $? -eq 0 ]]; then - # Delete the Elastlaert rules - rm -f /opt/so/rules/elastalert/playbook/*.yaml - echo "Active Elastalert rules have been backed up." - else - echo "Error: rsync failed to copy the files. Active Elastalert rules have not been backed up." - exit 1 - fi + # Verify that rsync completed successfully + if [[ $? -eq 0 ]]; then + # Delete the Elastlaert rules + rm -f /opt/so/rules/elastalert/playbook/*.yaml + echo "Active Elastalert rules have been backed up." + else + echo "Error: rsync failed to copy the files. Active Elastalert rules have not been backed up." + exit 1 + fi + fi + + echo + echo "Exporting Sigma rules from Playbook..." + MYSQLPW=$(awk '/mysql:/ {print $2}' /opt/so/saltstack/local/pillar/secrets.sls) + + docker exec so-mysql sh -c "exec mysql -uroot -p${MYSQLPW} -D playbook -sN -e \"SELECT id, value FROM custom_values WHERE value LIKE '%View Sigma%'\"" | while read -r id value; do + echo -e "$value" > "/nsm/backup/detections-migration/sigma/rules/$id.yaml" + done + + echo + echo "Exporting Sigma Filters from Playbook..." + docker exec so-mysql sh -c "exec mysql -uroot -p${MYSQLPW} -D playbook -sN -e \"SELECT issues.subject as title, custom_values.value as filter FROM issues JOIN custom_values ON issues.id = custom_values.customized_id WHERE custom_values.value LIKE '%sofilter%'\"" > /nsm/backup/detections-migration/sigma/custom-filters.txt + + echo + echo "Backing up Playbook database..." + docker exec so-mysql sh -c "mysqldump -uroot -p${MYSQLPW} --databases playbook > /tmp/playbook-dump" + docker cp so-mysql:/tmp/playbook-dump /nsm/backup/detections-migration/sigma/playbook-dump.sql fi - echo - echo "Exporting Sigma rules from Playbook..." - MYSQLPW=$(lookup_pillar_secret mysql) - - docker exec so-mysql sh -c "exec mysql -uroot -p${MYSQLPW} -D playbook -sN -e \"SELECT id, value FROM custom_values WHERE value LIKE '%View Sigma%'\"" | while read -r id value; do - echo -e "$value" > "/nsm/backup/detections-migration/sigma/rules/$id.yaml" - done - - echo - echo "Exporting Sigma Filters from Playbook..." - docker exec so-mysql sh -c "exec mysql -uroot -p${MYSQLPW} -D playbook -sN -e \"SELECT issues.subject as title, custom_values.value as filter FROM issues JOIN custom_values ON issues.id = custom_values.customized_id WHERE custom_values.value LIKE '%sofilter%'\"" > /nsm/backup/detections-migration/sigma/custom-filters.txt - - echo - echo "Backing up Playbook database..." - docker exec so-mysql sh -c "mysqldump -uroot -p${MYSQLPW} --databases playbook > /tmp/playbook-dump" - docker cp so-mysql:/tmp/playbook-dump /nsm/backup/detections-migration/sigma/playbook-dump.sql - echo echo "Stopping Playbook services..." so-playbook-stop From ce0c9f846db5d74afa54727d097ecbb8a24fefb5 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Wed, 27 Mar 2024 16:13:52 -0400 Subject: [PATCH 268/777] Remove containers from so-status --- salt/manager/tools/sbin/soup | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 62a579e18..069a4f345 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -643,6 +643,8 @@ up_to_2.4.70() { so-playbook-stop so-mysql-stop so-soctopus-stop + sed -i '/so-playbook\|so-soctopus\|so-mysql/d' /opt/so/conf/so-status/so-status.conf + # What about cleaning up various so-utilities like so-playbook-restart? echo From 216b8c01bf448db9b73ec6bbf1dc581da45f3617 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 28 Mar 2024 09:31:39 -0400 Subject: [PATCH 269/777] disregard errors that in removed applications that occurred before the upgrade --- salt/common/tools/sbin/so-log-check | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index b5f9d77cb..a4b25f0f3 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -207,6 +207,9 @@ RESULT=0 CONTAINER_IDS=$(docker ps -q) exclude_container so-kibana # kibana error logs are too verbose with large varieties of errors most of which are temporary exclude_container so-idstools # ignore due to known issues and noisy logging +exclude_container so-playbook # Playbook is removed as of 2.4.70, disregard output in stopped containers +exclude_container so-mysql # MySQL is removed as of 2.4.70, disregard output in stopped containers +exclude_container so-soctopus # Soctopus is removed as of 2.4.70, disregard output in stopped containers for container_id in $CONTAINER_IDS; do container_name=$(docker ps --format json | jq ". | select(.ID==\"$container_id\")|.Names") @@ -224,10 +227,12 @@ exclude_log "kibana.log" # kibana error logs are too verbose with large variet exclude_log "spool" # disregard zeek analyze logs as this is data specific exclude_log "import" # disregard imported test data the contains error strings exclude_log "update.log" # ignore playbook updates due to several known issues -exclude_log "playbook.log" # ignore due to several playbook known issues exclude_log "cron-cluster-delete.log" # ignore since Curator has been removed exclude_log "cron-close.log" # ignore since Curator has been removed -exclude_log "curator.log" # ignore since Curator has been removed +exclude_log "curator.log" # ignore since Curator has been removed +exclude_log "playbook.log" # Playbook is removed as of 2.4.70, logs may still be on disk +exclude_log "mysqld.log" # MySQL is removed as of 2.4.70, logs may still be on disk +exclude_log "soctopus.log" # Soctopus is removed as of 2.4.70, logs may still be on disk for log_file in $(cat /tmp/log_check_files); do status "Checking log file $log_file" From d2c9e0ea4aaa7e7ff7e0aafdf9fa25cbc681eda5 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Thu, 28 Mar 2024 13:04:48 -0400 Subject: [PATCH 270/777] Cleanup --- salt/manager/tools/sbin/soup | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 069a4f345..822fa05d2 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -604,7 +604,7 @@ up_to_2.4.70() { echo # Read user input read INPUT - if [ "${INPUT^^}" != 'AGREE' ]; then exit 0; fi + if [ "${INPUT^^}" != 'AGREE' ]; then fail "SOUP canceled."; fi echo "Backing up the Elastalert rules..." rsync -av --stats /opt/so/rules/elastalert/playbook/*.yaml /nsm/backup/detections-migration/elastalert/ @@ -615,8 +615,7 @@ up_to_2.4.70() { rm -f /opt/so/rules/elastalert/playbook/*.yaml echo "Active Elastalert rules have been backed up." else - echo "Error: rsync failed to copy the files. Active Elastalert rules have not been backed up." - exit 1 + fail "Error: rsync failed to copy the files. Active Elastalert rules have not been backed up." fi fi @@ -639,14 +638,13 @@ up_to_2.4.70() { fi echo - echo "Stopping Playbook services..." - so-playbook-stop - so-mysql-stop - so-soctopus-stop + echo "Stopping Playbook services & cleaning up..." + docker stop so-playbook 2>/dev/null + docker stop so-mysql 2>/dev/null + docker stop so-soctopus 2>/dev/null sed -i '/so-playbook\|so-soctopus\|so-mysql/d' /opt/so/conf/so-status/so-status.conf + rm -f /usr/sbin/so-playbook-* /usr/sbin/so-soctopus-* /usr/sbin/so-mysql-* - - # What about cleaning up various so-utilities like so-playbook-restart? echo echo "Playbook Migration is complete...." From 9c5ba92589e583182373acff0a0eefd00573fea2 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Thu, 28 Mar 2024 13:23:40 -0400 Subject: [PATCH 271/777] Check if container is running first --- salt/manager/tools/sbin/soup | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 822fa05d2..4020ec531 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -639,9 +639,11 @@ up_to_2.4.70() { echo echo "Stopping Playbook services & cleaning up..." - docker stop so-playbook 2>/dev/null - docker stop so-mysql 2>/dev/null - docker stop so-soctopus 2>/dev/null + for container in so-playbook so-mysql so-soctopus; do + if [ -n "$(docker ps -q -f name=^${container}$)" ]; then + docker stop $container + fi + done sed -i '/so-playbook\|so-soctopus\|so-mysql/d' /opt/so/conf/so-status/so-status.conf rm -f /usr/sbin/so-playbook-* /usr/sbin/so-soctopus-* /usr/sbin/so-mysql-* From 32b8649c77e0698c987b0b4e01fdeb9dfcb406ad Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Thu, 28 Mar 2024 14:31:02 -0400 Subject: [PATCH 272/777] Add more error checking --- salt/manager/tools/sbin/soup | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 4020ec531..d5abda783 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -625,16 +625,16 @@ up_to_2.4.70() { docker exec so-mysql sh -c "exec mysql -uroot -p${MYSQLPW} -D playbook -sN -e \"SELECT id, value FROM custom_values WHERE value LIKE '%View Sigma%'\"" | while read -r id value; do echo -e "$value" > "/nsm/backup/detections-migration/sigma/rules/$id.yaml" - done + done || fail "Failed to export Sigma rules..." echo echo "Exporting Sigma Filters from Playbook..." - docker exec so-mysql sh -c "exec mysql -uroot -p${MYSQLPW} -D playbook -sN -e \"SELECT issues.subject as title, custom_values.value as filter FROM issues JOIN custom_values ON issues.id = custom_values.customized_id WHERE custom_values.value LIKE '%sofilter%'\"" > /nsm/backup/detections-migration/sigma/custom-filters.txt + docker exec so-mysql sh -c "exec mysql -uroot -p${MYSQLPW} -D playbook -sN -e \"SELECT issues.subject as title, custom_values.value as filter FROM issues JOIN custom_values ON issues.id = custom_values.customized_id WHERE custom_values.value LIKE '%sofilter%'\"" > /nsm/backup/detections-migration/sigma/custom-filters.txt || fail "Failed to export Custom Sigma Filters." echo echo "Backing up Playbook database..." - docker exec so-mysql sh -c "mysqldump -uroot -p${MYSQLPW} --databases playbook > /tmp/playbook-dump" - docker cp so-mysql:/tmp/playbook-dump /nsm/backup/detections-migration/sigma/playbook-dump.sql + docker exec so-mysql sh -c "mysqldump -uroot -p${MYSQLPW} --databases playbook > /tmp/playbook-dump" || fail "Failed to dump Playbook database." + docker cp so-mysql:/tmp/playbook-dump /nsm/backup/detections-migration/sigma/playbook-dump.sql || fail "Failed to backup Playbook database." fi echo From 102c3271d1480a35d0dc15866769058e9741de71 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Fri, 29 Mar 2024 12:04:47 -0400 Subject: [PATCH 273/777] FEATURE: Add process.command_line to Process Info and Process Ancestry dashboards #12694 --- salt/soc/defaults.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 861f6b02c..987011c99 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -70,13 +70,13 @@ soc: icon: fa-person-running target: '' links: - - '/#/hunt?q=(process.entity_id:"{:process.entity_id}") | groupby event.dataset | groupby -sankey event.dataset event.action | groupby event.action | groupby process.name | groupby host.name user.name | groupby source.ip source.port destination.ip destination.port | groupby dns.question.name | groupby dns.answers.data | groupby file.path | groupby registry.path | groupby dll.path' + - '/#/hunt?q=(process.entity_id:"{:process.entity_id}") | groupby event.dataset | groupby -sankey event.dataset event.action | groupby event.action | groupby process.name | groupby process.command_line | groupby host.name user.name | groupby source.ip source.port destination.ip destination.port | groupby dns.question.name | groupby dns.answers.data | groupby file.path | groupby registry.path | groupby dll.path' - name: actionProcessAncestors description: actionProcessAncestorsHelp icon: fa-people-roof target: '' links: - - '/#/hunt?q=(process.entity_id:"{:process.entity_id}" OR process.entity_id:"{:process.Ext.ancestry|processAncestors}") | groupby event.dataset | groupby -sankey event.dataset event.action | groupby event.action | groupby process.parent.name | groupby -sankey process.parent.name process.name | groupby process.name | groupby host.name user.name | groupby source.ip source.port destination.ip destination.port | groupby dns.question.name | groupby dns.answers.data | groupby file.path | groupby registry.path | groupby dll.path' + - '/#/hunt?q=(process.entity_id:"{:process.entity_id}" OR process.entity_id:"{:process.Ext.ancestry|processAncestors}") | groupby event.dataset | groupby -sankey event.dataset event.action | groupby event.action | groupby process.parent.name | groupby -sankey process.parent.name process.name | groupby process.name | groupby process.command_line | groupby host.name user.name | groupby source.ip source.port destination.ip destination.port | groupby dns.question.name | groupby dns.answers.data | groupby file.path | groupby registry.path | groupby dll.path' eventFields: default: - soc_timestamp From 000d15a53c60bf923b826602fcb93f3de7ec722c Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Fri, 29 Mar 2024 13:56:01 -0400 Subject: [PATCH 274/777] Kismet integration: TODO Elasticsearch mappings Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/elasticfleet/defaults.yaml | 5 + .../files/integrations-optional/kismet.json | 36 ++++ salt/elasticfleet/soc_elasticfleet.yaml | 26 +++ salt/elasticsearch/defaults.yaml | 43 +++++ salt/elasticsearch/files/ingest/kismet.ad_hoc | 10 ++ salt/elasticsearch/files/ingest/kismet.ap | 50 ++++++ .../elasticsearch/files/ingest/kismet.bridged | 16 ++ salt/elasticsearch/files/ingest/kismet.client | 29 ++++ salt/elasticsearch/files/ingest/kismet.common | 158 ++++++++++++++++++ salt/elasticsearch/files/ingest/kismet.device | 9 + salt/elasticsearch/files/ingest/kismet.seenby | 52 ++++++ salt/elasticsearch/files/ingest/kismet.wds | 10 ++ salt/elasticsearch/files/ingest/kismet.wds_ap | 22 +++ salt/elasticsearch/soc_elasticsearch.yaml | 1 + .../templates/component/ecs/kismet.json | 16 ++ 15 files changed, 483 insertions(+) create mode 100644 salt/elasticfleet/files/integrations-optional/kismet.json create mode 100644 salt/elasticsearch/files/ingest/kismet.ad_hoc create mode 100644 salt/elasticsearch/files/ingest/kismet.ap create mode 100644 salt/elasticsearch/files/ingest/kismet.bridged create mode 100644 salt/elasticsearch/files/ingest/kismet.client create mode 100644 salt/elasticsearch/files/ingest/kismet.common create mode 100644 salt/elasticsearch/files/ingest/kismet.device create mode 100644 salt/elasticsearch/files/ingest/kismet.seenby create mode 100644 salt/elasticsearch/files/ingest/kismet.wds create mode 100644 salt/elasticsearch/files/ingest/kismet.wds_ap create mode 100644 salt/elasticsearch/templates/component/ecs/kismet.json diff --git a/salt/elasticfleet/defaults.yaml b/salt/elasticfleet/defaults.yaml index 7b2d9d6a3..2af7e7532 100644 --- a/salt/elasticfleet/defaults.yaml +++ b/salt/elasticfleet/defaults.yaml @@ -118,3 +118,8 @@ elasticfleet: base_url: https://api.platform.sublimesecurity.com poll_interval: 5m limit: 100 + kismet: + base_url: http://localhost:2501 + poll_interval: 1m + api_key: + enabled_nodes: [] \ No newline at end of file diff --git a/salt/elasticfleet/files/integrations-optional/kismet.json b/salt/elasticfleet/files/integrations-optional/kismet.json new file mode 100644 index 000000000..9a333f31c --- /dev/null +++ b/salt/elasticfleet/files/integrations-optional/kismet.json @@ -0,0 +1,36 @@ +{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %} +{% raw %} +{ + "package": { + "name": "httpjson", + "version": "" + }, + "name": "kismet-logs", + "namespace": "so", + "description": "Kismet Logs", + "policy_id": "FleetServer_{% endraw %}{{ NAME }}{% raw %}", + "inputs": { + "generic-httpjson": { + "enabled": true, + "streams": { + "httpjson.generic": { + "enabled": true, + "vars": { + "data_stream.dataset": "kismet", + "request_url": "{% endraw %}{{ ELASTICFLEETMERGED.optional_integrations.kismet.base_url }}{% raw %}/devices/last-time/-600/devices.tjson", + "request_interval": "{% endraw %}{{ ELASTICFLEETMERGED.optional_integrations.kismet.poll_interval }}{% raw %}", + "request_method": "GET", + "request_transforms": "- set:\r\n target: header.Cookie\r\n value: 'KISMET={% endraw %}{{ ELASTICFLEETMERGED.optional_integrations.kismet.api_key }}{% raw %}'", + "request_redirect_headers_ban_list": [], + "oauth_scopes": [], + "processors": "", + "tags": [], + "pipeline": "kismet.common" + } + } + } + } + }, + "force": true +} +{% endraw %} \ No newline at end of file diff --git a/salt/elasticfleet/soc_elasticfleet.yaml b/salt/elasticfleet/soc_elasticfleet.yaml index 9a0cd0a91..206febcd7 100644 --- a/salt/elasticfleet/soc_elasticfleet.yaml +++ b/salt/elasticfleet/soc_elasticfleet.yaml @@ -79,3 +79,29 @@ elasticfleet: helpLink: elastic-fleet.html advanced: True forcedType: int + kismet: + base_url: + description: Base URL for Kismet. + global: True + helpLink: elastic-fleet.html + advanced: True + forcedType: string + poll_interval: + description: Poll interval for wireless device data from Kismet. Integration is currently configured to report devices seen as active by any Kismet sensor within the last 600 seconds of polling. + global: True + helpLink: elastic-fleet.html + advanced: True + forcedType: string + api_key: + description: API key for Kismet. + global: True + helpLink: elastic-fleet.html + advanced: True + forcedType: string + sensitive: True + enabled_nodes: + description: Fleet nodes with the Kismet integration enabled. Enter one per line. + global: True + helpLink: elastic-fleet.html + advanced: True + forcedType: "[]string" diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index c70b0419a..048dd0c7f 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -10491,6 +10491,49 @@ elasticsearch: set_priority: priority: 50 min_age: 30d + so-kismet: + index_sorting: false + index_template: + composed_of: + - kismet-mappings + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-kismet-so* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-kismet-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d so-logstash: index_sorting: false index_template: diff --git a/salt/elasticsearch/files/ingest/kismet.ad_hoc b/salt/elasticsearch/files/ingest/kismet.ad_hoc new file mode 100644 index 000000000..8cbc9cd2b --- /dev/null +++ b/salt/elasticsearch/files/ingest/kismet.ad_hoc @@ -0,0 +1,10 @@ +{ + "processors": [ + { + "rename": { + "field": "message2.kismet_device_base_macaddr", + "target_field": "wireless.bssid" + } + } + ] +} \ No newline at end of file diff --git a/salt/elasticsearch/files/ingest/kismet.ap b/salt/elasticsearch/files/ingest/kismet.ap new file mode 100644 index 000000000..1b8cbb80e --- /dev/null +++ b/salt/elasticsearch/files/ingest/kismet.ap @@ -0,0 +1,50 @@ +{ + "processors": [ + { + "rename": { + "field": "message2.dot11_device.dot11_device_last_beaconed_ssid_record.dot11_advertisedssid_cloaked", + "target_field": "wireless.ssid_cloaked", + "if": "ctx?.message2?.dot11_device?.dot11_device_last_beaconed_ssid_record?.dot11_advertisedssid_cloaked != null" + } + }, + { + "rename": { + "field": "message2.dot11_device.dot11_device_last_beaconed_ssid_record.dot11_advertisedssid_ssid", + "target_field": "wireless.ssid", + "if": "ctx?.message2?.dot11_device?.dot11_device_last_beaconed_ssid_record?.dot11_advertisedssid_ssid != null" + } + }, + { + "set": { + "field": "wireless.ssid", + "value": "Hidden", + "if": "ctx?.wireless?.ssid_cloaked != null && ctx?.wireless?.ssid_cloaked == 1" + } + }, + { + "rename": { + "field": "message2.dot11_device.dot11_device_last_beaconed_ssid_record.dot11_advertisedssid_dot11e_channel_utilization_perc", + "target_field": "wireless.channel_utilization", + "if": "ctx?.message2?.dot11_device?.dot11_device_last_beaconed_ssid_record?.dot11_advertisedssid_dot11e_channel_utilization_perc != null" + } + }, + { + "rename": { + "field": "message2.dot11_device.dot11_device_last_bssid", + "target_field": "wireless.bssid" + } + }, + { + "foreach": { + "field": "message2.dot11_device.dot11_device_associated_client_map", + "processor": { + "append": { + "field": "wireless.associated_clients", + "value": "{{_ingest._key}}" + } + }, + "if": "ctx?.message2?.dot11_device?.dot11_device_associated_client_map != null" + } + } + ] +} \ No newline at end of file diff --git a/salt/elasticsearch/files/ingest/kismet.bridged b/salt/elasticsearch/files/ingest/kismet.bridged new file mode 100644 index 000000000..5eee3b78c --- /dev/null +++ b/salt/elasticsearch/files/ingest/kismet.bridged @@ -0,0 +1,16 @@ +{ + "processors": [ + { + "rename": { + "field": "message2.kismet_device_base_macaddr", + "target_field": "client.mac" + } + }, + { + "rename": { + "field": "message2.dot11_device.dot11_device_last_bssid", + "target_field": "wireless.bssid" + } + } + ] +} \ No newline at end of file diff --git a/salt/elasticsearch/files/ingest/kismet.client b/salt/elasticsearch/files/ingest/kismet.client new file mode 100644 index 000000000..8b3d3069b --- /dev/null +++ b/salt/elasticsearch/files/ingest/kismet.client @@ -0,0 +1,29 @@ +{ + "processors": [ + { + "rename": { + "field": "message2.kismet_device_base_macaddr", + "target_field": "client.mac" + } + }, + { + "rename": { + "field": "message2.dot11_device.dot11_device_last_bssid", + "target_field": "wireless.last_connected_bssid", + "if": "ctx?.message2?.dot11_device?.dot11_device_last_bssid != null" + } + }, + { + "foreach": { + "field": "message2.dot11_device.dot11_device_client_map", + "processor": { + "append": { + "field": "wireless.known_connected_bssid", + "value": "{{_ingest._key}}" + } + }, + "if": "ctx?.message2?.dot11_device?.dot11_device_client_map != null" + } + } + ] +} \ No newline at end of file diff --git a/salt/elasticsearch/files/ingest/kismet.common b/salt/elasticsearch/files/ingest/kismet.common new file mode 100644 index 000000000..95eb29b73 --- /dev/null +++ b/salt/elasticsearch/files/ingest/kismet.common @@ -0,0 +1,158 @@ +{ + "processors": [ + { + "json": { + "field": "message", + "target_field": "message2" + } + }, + { + "date": { + "field": "message2.kismet_device_base_mod_time", + "formats": [ + "epoch_second" + ], + "target_field": "@timestamp" + } + }, + { + "set": { + "field": "event.category", + "value": "network" + } + }, + { + "dissect": { + "field": "message2.kismet_device_base_type", + "pattern": "%{wifi} %{device_type}" + } + }, + { + "lowercase": { + "field": "device_type" + } + }, + { + "set": { + "field": "event.dataset", + "value": "kismet.{{device_type}}" + } + }, + { + "set": { + "field": "event.dataset", + "value": "kismet.wds_ap", + "if": "ctx?.device_type == 'wds ap'" + } + }, + { + "set": { + "field": "event.dataset", + "value": "kismet.ad_hoc", + "if": "ctx?.device_type == 'ad-hoc'" + } + }, + { + "set": { + "field": "event.module", + "value": "kismet" + } + }, + { + "rename": { + "field": "message2.kismet_device_base_packets_tx_total", + "target_field": "source.packets" + } + }, + { + "rename": { + "field": "message2.kismet_device_base_num_alerts", + "target_field": "kismet.alerts.count" + } + }, + { + "rename": { + "field": "message2.kismet_device_base_channel", + "target_field": "wireless.channel", + "if": "ctx?.message2?.kismet_device_base_channel != ''" + } + }, + { + "rename": { + "field": "message2.kismet_device_base_frequency", + "target_field": "wireless.frequency", + "if": "ctx?.message2?.kismet_device_base_frequency != 0" + } + }, + { + "rename": { + "field": "message2.kismet_device_base_last_time", + "target_field": "kismet.last_seen" + } + }, + { + "date": { + "field": "kismet.last_seen", + "formats": [ + "epoch_second" + ], + "target_field": "kismet.last_seen" + } + }, + { + "rename": { + "field": "message2.kismet_device_base_first_time", + "target_field": "kismet.first_seen" + } + }, + { + "date": { + "field": "kismet.first_seen", + "formats": [ + "epoch_second" + ], + "target_field": "kismet.first_seen" + } + }, + { + "rename": { + "field": "message2.kismet_device_base_seenby", + "target_field": "kismet.seenby" + } + }, + { + "foreach": { + "field": "kismet.seenby", + "processor": { + "pipeline": { + "name": "kismet.seenby" + } + } + } + }, + { + "rename": { + "field": "message2.kismet_device_base_manuf", + "target_field": "device.manufacturer" + } + }, + { + "pipeline": { + "name": "{{event.dataset}}" + } + }, + { + "remove": { + "field": [ + "message2", + "message", + "device_type", + "wifi", + "agent", + "host" + ], + "ignore_failure": true + } + } + ] +} \ No newline at end of file diff --git a/salt/elasticsearch/files/ingest/kismet.device b/salt/elasticsearch/files/ingest/kismet.device new file mode 100644 index 000000000..49d0c7ad7 --- /dev/null +++ b/salt/elasticsearch/files/ingest/kismet.device @@ -0,0 +1,9 @@ +{ + "processors": [ + { + "pipeline": { + "name": "kismet.client" + } + } + ] +} \ No newline at end of file diff --git a/salt/elasticsearch/files/ingest/kismet.seenby b/salt/elasticsearch/files/ingest/kismet.seenby new file mode 100644 index 000000000..d41220d76 --- /dev/null +++ b/salt/elasticsearch/files/ingest/kismet.seenby @@ -0,0 +1,52 @@ +{ + "processors": [ + { + "rename": { + "field": "_ingest._value.kismet_common_seenby_num_packets", + "target_field": "_ingest._value.packets_seen", + "ignore_missing": true + } + }, + { + "rename": { + "field": "_ingest._value.kismet_common_seenby_uuid", + "target_field": "_ingest._value.serial_number", + "ignore_missing": true + } + }, + { + "rename": { + "field": "_ingest._value.kismet_common_seenby_first_time", + "target_field": "_ingest._value.first_seen", + "ignore_missing": true + } + }, + { + "rename": { + "field": "_ingest._value.kismet_common_seenby_last_time", + "target_field": "_ingest._value.last_seen", + "ignore_missing": true + } + }, + { + "date": { + "field": "_ingest._value.first_seen", + "formats": [ + "epoch_second" + ], + "target_field": "_ingest._value.first_seen", + "ignore_failure": true + } + }, + { + "date": { + "field": "_ingest._value.last_seen", + "formats": [ + "epoch_second" + ], + "target_field": "_ingest._value.last_seen", + "ignore_failure": true + } + } + ] +} \ No newline at end of file diff --git a/salt/elasticsearch/files/ingest/kismet.wds b/salt/elasticsearch/files/ingest/kismet.wds new file mode 100644 index 000000000..1e426c463 --- /dev/null +++ b/salt/elasticsearch/files/ingest/kismet.wds @@ -0,0 +1,10 @@ +{ + "processors": [ + { + "rename": { + "field": "message2.kismet_device_base_macaddr", + "target_field": "client.mac" + } + } + ] +} \ No newline at end of file diff --git a/salt/elasticsearch/files/ingest/kismet.wds_ap b/salt/elasticsearch/files/ingest/kismet.wds_ap new file mode 100644 index 000000000..7f43d43fd --- /dev/null +++ b/salt/elasticsearch/files/ingest/kismet.wds_ap @@ -0,0 +1,22 @@ +{ + "processors": [ + { + "rename": { + "field": "message2.kismet_device_base_commonname", + "target_field": "wireless.bssid" + } + }, + { + "foreach": { + "field": "message2.dot11_device.dot11_device_associated_client_map", + "processor": { + "append": { + "field": "wireless.associated_clients", + "value": "{{_ingest._key}}" + } + }, + "if": "ctx?.message2?.dot11_device?.dot11_device_associated_client_map != null" + } + } + ] +} \ No newline at end of file diff --git a/salt/elasticsearch/soc_elasticsearch.yaml b/salt/elasticsearch/soc_elasticsearch.yaml index e68d0441b..c684c6154 100644 --- a/salt/elasticsearch/soc_elasticsearch.yaml +++ b/salt/elasticsearch/soc_elasticsearch.yaml @@ -511,6 +511,7 @@ elasticsearch: so-suricata: *indexSettings so-import: *indexSettings so-kratos: *indexSettings + so-kismet: *indexSettings so-logstash: *indexSettings so-redis: *indexSettings so-strelka: *indexSettings diff --git a/salt/elasticsearch/templates/component/ecs/kismet.json b/salt/elasticsearch/templates/component/ecs/kismet.json new file mode 100644 index 000000000..d388b7127 --- /dev/null +++ b/salt/elasticsearch/templates/component/ecs/kismet.json @@ -0,0 +1,16 @@ +{ + "_meta": { + "documentation": "https://www.elastic.co/guide/en/ecs/current/ecs-base.html", + "ecs_version": "1.12.2" + }, + "template": { + "mappings": { + "properties": { + "kismet_mapping_placeholder": { + "type": "keyword", + "ignore_above": 1024 + } + } + } + } +} \ No newline at end of file From e747a4e3fed1a42e5ec41be0d3f17cc6aa89d26b Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Fri, 29 Mar 2024 12:25:03 -0600 Subject: [PATCH 275/777] New Settings for Manual Sync in Detections --- salt/soc/defaults.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 987011c99..4ecf1713d 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1993,6 +1993,13 @@ soc: mostRecentlyUsedLimit: 5 safeStringMaxLength: 100 queryBaseFilter: '_index:"*:so-detection" AND so_kind:detection' + presets: + manualSync: + customEnabled:false + labels: + - Suricata + - Strelka + - ElastAlert eventFields: default: - so_detection.title From 0c7ba6286790ccc0cb7ab822fa71d6914585642b Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Fri, 29 Mar 2024 14:44:29 -0400 Subject: [PATCH 276/777] FEATURE: Add Events table columns for zeek ssl and suricata ssl #12697 --- salt/soc/defaults.yaml | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 987011c99..d0e769620 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -457,7 +457,7 @@ soc: - ssh.server - log.id.uid - event.dataset - '::ssl': + ':suricata:ssl': - soc_timestamp - source.ip - source.port @@ -465,10 +465,30 @@ soc: - destination.port - ssl.server_name - ssl.certificate.subject + - ssl.version + - log.id.uid + - event.dataset + ':zeek:ssl': + - soc_timestamp + - source.ip + - source.port + - destination.ip + - destination.port + - ssl.server_name - ssl.validation_status - ssl.version - log.id.uid - event.dataset + '::ssl': + - soc_timestamp + - source.ip + - source.port + - destination.ip + - destination.port + - ssl.server_name + - ssl.version + - log.id.uid + - event.dataset ':zeek:syslog': - soc_timestamp - source.ip From b64ed5535e06690e31e83fb232848122b509256c Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Fri, 29 Mar 2024 15:29:38 -0400 Subject: [PATCH 277/777] FEATURE: Add individual dashboards for Zeek SSL and Suricata SSL logs #12699 --- salt/soc/defaults.yaml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 153937ad1..6fbbebd76 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1760,7 +1760,13 @@ soc: query: 'tags:ssh | groupby ssh.client | groupby -sankey ssh.client source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby ssh.server | groupby ssh.version | groupby ssh.hassh_version | groupby ssh.direction | groupby source_geo.organization_name | groupby destination_geo.organization_name' - name: SSL description: SSL/TLS network metadata - query: 'tags:ssl | groupby ssl.version | groupby ssl.validation_status | groupby -sankey ssl.validation_status ssl.server_name | groupby ssl.server_name | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name | groupby ssl.certificate.issuer | groupby ssl.certificate.subject' + query: 'tags:ssl | groupby ssl.version | groupby -sankey ssl.version ssl.server_name | groupby ssl.server_name | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + - name: SSL - Suricata + description: SSL/TLS network metadata from Suricata + query: 'event.dataset:suricata.ssl | groupby ssl.version | groupby -sankey ssl.version ssl.server_name | groupby ssl.server_name | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name | groupby ssl.certificate.issuer | groupby ssl.certificate.subject' + - name: SSL - Zeek + description: SSL/TLS network metadata from Zeek + query: 'event.dataset:zeek.ssl | groupby ssl.version | groupby ssl.validation_status | groupby -sankey ssl.validation_status ssl.server_name | groupby ssl.server_name | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' - name: STUN description: STUN (Session Traversal Utilities for NAT) network metadata query: 'tags:stun* | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination.geo.country_name | groupby event.dataset' From e5a3a54aea80421b027afbed99eeed3d8382e557 Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Fri, 29 Mar 2024 14:31:43 -0600 Subject: [PATCH 278/777] Proper YAML --- salt/soc/defaults.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 6fbbebd76..9ec22b180 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -2021,11 +2021,11 @@ soc: queryBaseFilter: '_index:"*:so-detection" AND so_kind:detection' presets: manualSync: - customEnabled:false + customEnabled: false labels: - - Suricata - - Strelka - - ElastAlert + - Suricata + - Strelka + - ElastAlert eventFields: default: - so_detection.title From 3aea2dec85745aca62f89a53cb6f8444629dce32 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Mon, 1 Apr 2024 09:50:18 -0400 Subject: [PATCH 279/777] analytics --- salt/manager/tools/sbin/so-yaml.py | 82 +++++++++++- salt/manager/tools/sbin/so-yaml_test.py | 159 ++++++++++++++++++++++++ salt/manager/tools/sbin/soup | 42 +++++++ salt/soc/config.sls | 9 ++ salt/soc/defaults.yaml | 1 + salt/soc/enabled.sls | 5 + salt/soc/files/soc/analytics.js | 5 + salt/soc/files/soc/motd.md | 4 + salt/soc/soc_soc.yaml | 5 + setup/so-functions | 4 + setup/so-setup | 5 + setup/so-whiptail | 20 +++ 12 files changed, 339 insertions(+), 2 deletions(-) create mode 100644 salt/soc/files/soc/analytics.js diff --git a/salt/manager/tools/sbin/so-yaml.py b/salt/manager/tools/sbin/so-yaml.py index 41cab0b23..5427a2e48 100755 --- a/salt/manager/tools/sbin/so-yaml.py +++ b/salt/manager/tools/sbin/so-yaml.py @@ -17,13 +17,16 @@ def showUsage(args): print('Usage: {} [ARGS...]'.format(sys.argv[0])) print(' General commands:') print(' append - Append a list item to a yaml key, if it exists and is a list. Requires KEY and LISTITEM args.') + print(' add - Add a new key and set its value. Fails if key already exists. Requires KEY and VALUE args.') print(' remove - Removes a yaml key, if it exists. Requires KEY arg.') + print(' replace - Replaces (or adds) a new key and set its value. Requires KEY and VALUE args.') print(' help - Prints this usage information.') print('') print(' Where:') print(' YAML_FILE - Path to the file that will be modified. Ex: /opt/so/conf/service/conf.yaml') print(' KEY - YAML key, does not support \' or " characters at this time. Ex: level1.level2') - print(' LISTITEM - Item to add to the list.') + print(' VALUE - Value to set for a given key') + print(' LISTITEM - Item to append to a given key\'s list value') sys.exit(1) @@ -37,6 +40,7 @@ def writeYaml(filename, content): file = open(filename, "w") return yaml.dump(content, file) + def appendItem(content, key, listItem): pieces = key.split(".", 1) if len(pieces) > 1: @@ -51,6 +55,30 @@ def appendItem(content, key, listItem): print("The key provided does not exist. No action was taken on the file.") return 1 + +def convertType(value): + if len(value) > 0 and (not value.startswith("0") or len(value) == 1): + if "." in value: + try: + value = float(value) + return value + except ValueError: + pass + + try: + value = int(value) + return value + except ValueError: + pass + + lowered_value = value.lower() + if lowered_value == "false": + return False + elif lowered_value == "true": + return True + return value + + def append(args): if len(args) != 3: print('Missing filename, key arg, or list item to append', file=sys.stderr) @@ -62,11 +90,41 @@ def append(args): listItem = args[2] content = loadYaml(filename) - appendItem(content, key, listItem) + appendItem(content, key, convertType(listItem)) writeYaml(filename, content) return 0 + +def addKey(content, key, value): + pieces = key.split(".", 1) + if len(pieces) > 1: + if not pieces[0] in content: + content[pieces[0]] = {} + addKey(content[pieces[0]], pieces[1], value) + elif key in content: + raise KeyError("key already exists") + else: + content[key] = value + + +def add(args): + if len(args) != 3: + print('Missing filename, key arg, and/or value', file=sys.stderr) + showUsage(None) + return + + filename = args[0] + key = args[1] + value = args[2] + + content = loadYaml(filename) + addKey(content, key, convertType(value)) + writeYaml(filename, content) + + return 0 + + def removeKey(content, key): pieces = key.split(".", 1) if len(pieces) > 1: @@ -91,6 +149,24 @@ def remove(args): return 0 +def replace(args): + if len(args) != 3: + print('Missing filename, key arg, and/or value', file=sys.stderr) + showUsage(None) + return + + filename = args[0] + key = args[1] + value = args[2] + + content = loadYaml(filename) + removeKey(content, key) + addKey(content, key, convertType(value)) + writeYaml(filename, content) + + return 0 + + def main(): args = sys.argv[1:] @@ -100,8 +176,10 @@ def main(): commands = { "help": showUsage, + "add": add, "append": append, "remove": remove, + "replace": replace, } code = 1 diff --git a/salt/manager/tools/sbin/so-yaml_test.py b/salt/manager/tools/sbin/so-yaml_test.py index 488877ea1..7effabac9 100644 --- a/salt/manager/tools/sbin/so-yaml_test.py +++ b/salt/manager/tools/sbin/so-yaml_test.py @@ -42,6 +42,14 @@ class TestRemove(unittest.TestCase): sysmock.assert_called() self.assertIn(mock_stdout.getvalue(), "Usage:") + def test_remove_missing_arg(self): + with patch('sys.exit', new=MagicMock()) as sysmock: + with patch('sys.stderr', new=StringIO()) as mock_stdout: + sys.argv = ["cmd", "help"] + soyaml.remove(["file"]) + sysmock.assert_called() + self.assertIn(mock_stdout.getvalue(), "Missing filename or key arg\n") + def test_remove(self): filename = "/tmp/so-yaml_test-remove.yaml" file = open(filename, "w") @@ -106,6 +114,14 @@ class TestRemove(unittest.TestCase): sysmock.assert_called_once_with(1) self.assertIn(mock_stdout.getvalue(), "Missing filename or key arg\n") + def test_append_missing_arg(self): + with patch('sys.exit', new=MagicMock()) as sysmock: + with patch('sys.stderr', new=StringIO()) as mock_stdout: + sys.argv = ["cmd", "help"] + soyaml.append(["file", "key"]) + sysmock.assert_called() + self.assertIn(mock_stdout.getvalue(), "Missing filename, key arg, or list item to append\n") + def test_append(self): filename = "/tmp/so-yaml_test-remove.yaml" file = open(filename, "w") @@ -201,3 +217,146 @@ class TestRemove(unittest.TestCase): soyaml.main() sysmock.assert_called() self.assertEqual(mock_stdout.getvalue(), "The existing value for the given key is not a list. No action was taken on the file.\n") + + def test_add_key(self): + content = {} + soyaml.addKey(content, "foo", 123) + self.assertEqual(content, {"foo": 123}) + + try: + soyaml.addKey(content, "foo", "bar") + self.assertFail("expected key error since key already exists") + except KeyError: + pass + + try: + soyaml.addKey(content, "foo.bar", 123) + self.assertFail("expected type error since key parent value is not a map") + except TypeError: + pass + + content = {} + soyaml.addKey(content, "foo", "bar") + self.assertEqual(content, {"foo": "bar"}) + + soyaml.addKey(content, "badda.badda", "boom") + self.assertEqual(content, {"foo": "bar", "badda": {"badda": "boom"}}) + + def test_add_missing_arg(self): + with patch('sys.exit', new=MagicMock()) as sysmock: + with patch('sys.stderr', new=StringIO()) as mock_stdout: + sys.argv = ["cmd", "help"] + soyaml.add(["file", "key"]) + sysmock.assert_called() + self.assertIn(mock_stdout.getvalue(), "Missing filename, key arg, and/or value\n") + + def test_add(self): + filename = "/tmp/so-yaml_test-add.yaml" + file = open(filename, "w") + file.write("{key1: { child1: 123, child2: abc }, key2: false, key3: [a,b,c]}") + file.close() + + soyaml.add([filename, "key4", "d"]) + + file = open(filename, "r") + actual = file.read() + file.close() + expected = "key1:\n child1: 123\n child2: abc\nkey2: false\nkey3:\n- a\n- b\n- c\nkey4: d\n" + self.assertEqual(actual, expected) + + def test_add_nested(self): + filename = "/tmp/so-yaml_test-add.yaml" + file = open(filename, "w") + file.write("{key1: { child1: 123, child2: [a,b,c] }, key2: false, key3: [e,f,g]}") + file.close() + + soyaml.add([filename, "key1.child3", "d"]) + + file = open(filename, "r") + actual = file.read() + file.close() + + expected = "key1:\n child1: 123\n child2:\n - a\n - b\n - c\n child3: d\nkey2: false\nkey3:\n- e\n- f\n- g\n" + self.assertEqual(actual, expected) + + def test_add_nested_deep(self): + filename = "/tmp/so-yaml_test-add.yaml" + file = open(filename, "w") + file.write("{key1: { child1: 123, child2: { deep1: 45 } }, key2: false, key3: [e,f,g]}") + file.close() + + soyaml.add([filename, "key1.child2.deep2", "d"]) + + file = open(filename, "r") + actual = file.read() + file.close() + + expected = "key1:\n child1: 123\n child2:\n deep1: 45\n deep2: d\nkey2: false\nkey3:\n- e\n- f\n- g\n" + self.assertEqual(actual, expected) + + def test_replace_missing_arg(self): + with patch('sys.exit', new=MagicMock()) as sysmock: + with patch('sys.stderr', new=StringIO()) as mock_stdout: + sys.argv = ["cmd", "help"] + soyaml.replace(["file", "key"]) + sysmock.assert_called() + self.assertIn(mock_stdout.getvalue(), "Missing filename, key arg, and/or value\n") + + def test_replace(self): + filename = "/tmp/so-yaml_test-add.yaml" + file = open(filename, "w") + file.write("{key1: { child1: 123, child2: abc }, key2: false, key3: [a,b,c]}") + file.close() + + soyaml.replace([filename, "key2", True]) + + file = open(filename, "r") + actual = file.read() + file.close() + expected = "key1:\n child1: 123\n child2: abc\nkey2: true\nkey3:\n- a\n- b\n- c\n" + self.assertEqual(actual, expected) + + def test_replace_nested(self): + filename = "/tmp/so-yaml_test-add.yaml" + file = open(filename, "w") + file.write("{key1: { child1: 123, child2: [a,b,c] }, key2: false, key3: [e,f,g]}") + file.close() + + soyaml.replace([filename, "key1.child2", "d"]) + + file = open(filename, "r") + actual = file.read() + file.close() + + expected = "key1:\n child1: 123\n child2: d\nkey2: false\nkey3:\n- e\n- f\n- g\n" + self.assertEqual(actual, expected) + + def test_replace_nested_deep(self): + filename = "/tmp/so-yaml_test-add.yaml" + file = open(filename, "w") + file.write("{key1: { child1: 123, child2: { deep1: 45 } }, key2: false, key3: [e,f,g]}") + file.close() + + soyaml.replace([filename, "key1.child2.deep1", 46]) + + file = open(filename, "r") + actual = file.read() + file.close() + + expected = "key1:\n child1: 123\n child2:\n deep1: 46\nkey2: false\nkey3:\n- e\n- f\n- g\n" + self.assertEqual(actual, expected) + + def test_convert(self): + self.assertEqual(soyaml.convertType("foo"), "foo") + self.assertEqual(soyaml.convertType("foo.bar"), "foo.bar") + self.assertEqual(soyaml.convertType("123"), 123) + self.assertEqual(soyaml.convertType("0"), 0) + self.assertEqual(soyaml.convertType("00"), "00") + self.assertEqual(soyaml.convertType("0123"), "0123") + self.assertEqual(soyaml.convertType("123.456"), 123.456) + self.assertEqual(soyaml.convertType("0123.456"), "0123.456") + self.assertEqual(soyaml.convertType("true"), True) + self.assertEqual(soyaml.convertType("TRUE"), True) + self.assertEqual(soyaml.convertType("false"), False) + self.assertEqual(soyaml.convertType("FALSE"), False) + self.assertEqual(soyaml.convertType(""), "") diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index a585f877c..db5335a7a 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -357,6 +357,7 @@ preupgrade_changes() { [[ "$INSTALLEDVERSION" == 2.4.30 ]] && up_to_2.4.40 [[ "$INSTALLEDVERSION" == 2.4.40 ]] && up_to_2.4.50 [[ "$INSTALLEDVERSION" == 2.4.50 ]] && up_to_2.4.60 + [[ "$INSTALLEDVERSION" == 2.4.60 ]] && up_to_2.4.70 true } @@ -373,6 +374,7 @@ postupgrade_changes() { [[ "$POSTVERSION" == 2.4.30 ]] && post_to_2.4.40 [[ "$POSTVERSION" == 2.4.40 ]] && post_to_2.4.50 [[ "$POSTVERSION" == 2.4.50 ]] && post_to_2.4.60 + [[ "$POSTVERSION" == 2.4.60 ]] && post_to_2.4.70 true } @@ -435,6 +437,11 @@ post_to_2.4.60() { POSTVERSION=2.4.60 } +post_to_2.4.70() { + echo "Nothing to apply" + POSTVERSION=2.4.70 +} + repo_sync() { echo "Sync the local repo." su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync." @@ -574,6 +581,41 @@ up_to_2.4.60() { INSTALLEDVERSION=2.4.60 } +up_to_2.4.70() { + if [[ -z $UNATTENDED && $is_airgap -ne 0 ]]; then + cat << ASSIST_EOF + +--------------- SOC Telemetry --------------- + +The Security Onion development team could use your help! Enabling SOC +Telemetry will help the team understand which UI features are being +used and enables informed prioritization of future development. + +Adjust this setting at anytime via the SOC Configuration screen. + +For more information visit https://docs.securityonion.net/telemetry.rst. + +ASSIST_EOF + + echo -n "Continue the upgrade with SOC Telemetry enabled [Y/n]? " + + read -r input + input=$(echo "${input,,}" | xargs echo -n) + echo "" + if [[ ${#input} -eq 0 || "$input" == "yes" || "$input" == "y" || "$input" == "yy" ]]; then + echo "Thank you for helping improve Security Onion!" + else + if so-yaml.py replace /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.telemetryEnabled false; then + echo "Disabled SOC Telemetry." + else + fail "Failed to disable SOC Telemetry; aborting." + fi + fi + echo "" + fi + INSTALLEDVERSION=2.4.70 +} + determine_elastic_agent_upgrade() { if [[ $is_airgap -eq 0 ]]; then update_elastic_agent_airgap diff --git a/salt/soc/config.sls b/salt/soc/config.sls index ad0ab1c8d..3e756f977 100644 --- a/salt/soc/config.sls +++ b/salt/soc/config.sls @@ -52,6 +52,15 @@ socsaltdir: - mode: 770 - makedirs: True +socanalytics: + file.managed: + - name: /opt/so/conf/soc/analytics.js + - source: salt://soc/files/soc/analytics.js + - user: 939 + - group: 939 + - mode: 600 + - show_changes: False + socconfig: file.managed: - name: /opt/so/conf/soc/soc.json diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 861f6b02c..2ba99cd11 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1,5 +1,6 @@ soc: enabled: False + telemetryEnabled: true config: logFilename: /opt/sensoroni/logs/sensoroni-server.log logLevel: info diff --git a/salt/soc/enabled.sls b/salt/soc/enabled.sls index bbe36e5b7..6cea0c70d 100644 --- a/salt/soc/enabled.sls +++ b/salt/soc/enabled.sls @@ -8,6 +8,7 @@ {% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'docker/docker.map.jinja' import DOCKER %} {% from 'soc/merged.map.jinja' import DOCKER_EXTRA_HOSTS %} +{% from 'soc/merged.map.jinja' import SOCMERGED %} include: - soc.config @@ -31,6 +32,9 @@ so-soc: - /nsm/soc/uploads:/nsm/soc/uploads:rw - /opt/so/log/soc/:/opt/sensoroni/logs/:rw - /opt/so/conf/soc/soc.json:/opt/sensoroni/sensoroni.json:ro +{% if SOCMERGED.telemetryEnabled and not GLOBALS.airgap %} + - /opt/so/conf/soc/analytics.js:/opt/sensoroni/html/js/analytics.js:ro +{% endif %} - /opt/so/conf/soc/motd.md:/opt/sensoroni/html/motd.md:ro - /opt/so/conf/soc/banner.md:/opt/sensoroni/html/login/banner.md:ro - /opt/so/conf/soc/sigma_so_pipeline.yaml:/opt/sensoroni/sigma_so_pipeline.yaml:ro @@ -67,6 +71,7 @@ so-soc: - file: socdatadir - file: soclogdir - file: socconfig + - file: socanalytics - file: socmotd - file: socbanner - file: soccustom diff --git a/salt/soc/files/soc/analytics.js b/salt/soc/files/soc/analytics.js new file mode 100644 index 000000000..6a0d72d5d --- /dev/null +++ b/salt/soc/files/soc/analytics.js @@ -0,0 +1,5 @@ +(function(w,d,s,l,i){w[l]=w[l]||[];w[l].push({'gtm.start': + new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0], + j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src= + 'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f); + })(window,document,'script','dataLayer','GTM-TM46SL7T'); diff --git a/salt/soc/files/soc/motd.md b/salt/soc/files/soc/motd.md index d6b0d3d27..005a2be0f 100644 --- a/salt/soc/files/soc/motd.md +++ b/salt/soc/files/soc/motd.md @@ -12,6 +12,10 @@ To see all the latest features and fixes in this version of Security Onion, clic Want the best hardware for your enterprise deployment? Check out our [enterprise appliances](https://securityonionsolutions.com/hardware/)! +## Premium Support + +Experiencing difficulties and need priority support or remote assistance? We offer a [premium support plan](https://securityonionsolutions.com/support/) to assist corporate, educational, and government organizations. + ## Customize This Space Make this area your own by customizing the content in the [Config](/#/config?s=soc.files.soc.motd__md) interface. diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index eae52e31b..eed0113fc 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -2,6 +2,11 @@ soc: enabled: description: You can enable or disable SOC. advanced: True + telemetryEnabled: + title: SOC Telemetry + description: When enabled, SOC provides feature usage data to the Security Onion development team via Google Analytics. This data helps Security Onion developers determine which product features are being used and can also provide insight into improving the user interface. When changing this setting, wait for the grid to fully synchronize and then perform a hard browser refresh on SOC, to force the browser cache to update and reflect the new setting. + global: True + helpLink: telemetry.html files: soc: banner__md: diff --git a/setup/so-functions b/setup/so-functions index 0d66a2621..3a0da7bda 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1258,6 +1258,10 @@ soc_pillar() { " server:"\ " srvKey: '$SOCSRVKEY'"\ "" > "$soc_pillar_file" + + if [[ $telemetry -ne 0 ]]; then + echo " telemetryEnabled: false" >> $soc_pillar_file + fi } telegraf_pillar() { diff --git a/setup/so-setup b/setup/so-setup index 2f62dca78..fc13e5b18 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -447,6 +447,7 @@ if ! [[ -f $install_opt_file ]]; then get_redirect # Does the user want to allow access to the UI? collect_so_allow + whiptail_accept_telemetry whiptail_end_settings elif [[ $is_standalone ]]; then waitforstate=true @@ -468,6 +469,7 @@ if ! [[ -f $install_opt_file ]]; then collect_webuser_inputs get_redirect collect_so_allow + whiptail_accept_telemetry whiptail_end_settings elif [[ $is_manager ]]; then info "Setting up as node type manager" @@ -488,6 +490,7 @@ if ! [[ -f $install_opt_file ]]; then collect_webuser_inputs get_redirect collect_so_allow + whiptail_accept_telemetry whiptail_end_settings elif [[ $is_managersearch ]]; then info "Setting up as node type managersearch" @@ -508,6 +511,7 @@ if ! [[ -f $install_opt_file ]]; then collect_webuser_inputs get_redirect collect_so_allow + whiptail_accept_telemetry whiptail_end_settings elif [[ $is_sensor ]]; then info "Setting up as node type sensor" @@ -597,6 +601,7 @@ if ! [[ -f $install_opt_file ]]; then collect_webuser_inputs get_redirect collect_so_allow + whiptail_accept_telemetry whiptail_end_settings elif [[ $is_receiver ]]; then diff --git a/setup/so-whiptail b/setup/so-whiptail index 904654c9b..95b21ccde 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -144,6 +144,26 @@ whiptail_cancel() { exit 1 } +whiptail_accept_telemetry() { + + [ -n "$TESTING" ] && return + + read -r -d '' message <<- EOM + + The Security Onion development team could use your help! Enabling SOC + Telemetry will help the team understand which UI features are being + used and enables informed prioritization of future development. + + Adjust this setting at anytime via the SOC Configuration screen. + + For more information visit https://docs.securityonion.net/telemetry.rst. + + Enable SOC Telemetry to help improve future releases? + EOM + whiptail --title "$whiptail_title" --yesno "$message" 13 75 + telemetry=$? +} + whiptail_check_exitstatus() { case $1 in 1) From c1bf710e463c15428be7d1ec3c44adaec2e7fa15 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Mon, 1 Apr 2024 10:32:25 -0400 Subject: [PATCH 280/777] limit col size --- setup/so-whiptail | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-whiptail b/setup/so-whiptail index 95b21ccde..b4928eb98 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -156,7 +156,7 @@ whiptail_accept_telemetry() { Adjust this setting at anytime via the SOC Configuration screen. - For more information visit https://docs.securityonion.net/telemetry.rst. + Additional information: https://docs.securityonion.net/telemetry.rst. Enable SOC Telemetry to help improve future releases? EOM From 2c68fd6311eb90672ea5d3c515ee496594a451d8 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Mon, 1 Apr 2024 10:32:54 -0400 Subject: [PATCH 281/777] limit col size --- setup/so-whiptail | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-whiptail b/setup/so-whiptail index b4928eb98..77e12c63f 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -160,7 +160,7 @@ whiptail_accept_telemetry() { Enable SOC Telemetry to help improve future releases? EOM - whiptail --title "$whiptail_title" --yesno "$message" 13 75 + whiptail --title "$whiptail_title" --yesno "$message" 15 75 telemetry=$? } From ff777560ac88abb527cd6b3ef14a3d12bed9eda6 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Mon, 1 Apr 2024 10:35:15 -0400 Subject: [PATCH 282/777] limit col size --- setup/so-whiptail | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/setup/so-whiptail b/setup/so-whiptail index 77e12c63f..5fa5bf343 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -451,6 +451,12 @@ whiptail_end_settings() { done fi + if [[ $telemetry -eq 0 ]]; then + __append_end_msg "SOC Telemetry: enabled" + else + __append_end_msg "SOC Telemetry: disabled" + fi + # ADVANCED if [[ $MANAGERADV == 'ADVANCED' ]]; then __append_end_msg "Advanced Manager Settings:" From f17d8d3369f136615d078f457efe6b52545b3398 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Mon, 1 Apr 2024 10:59:44 -0400 Subject: [PATCH 283/777] analytics --- salt/manager/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index db5335a7a..30a170ea7 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -593,7 +593,7 @@ used and enables informed prioritization of future development. Adjust this setting at anytime via the SOC Configuration screen. -For more information visit https://docs.securityonion.net/telemetry.rst. +Additional information: https://docs.securityonion.net/telemetry.rst. ASSIST_EOF From 7f488422b0deea50ba37a953dd0781c1dc9c45ae Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Tue, 2 Apr 2024 09:13:27 -0400 Subject: [PATCH 284/777] Add default columns --- salt/soc/defaults.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 9ec22b180..8b6bceef0 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -2033,6 +2033,7 @@ soc: - so_detection.severity - so_detection.language - so_detection.ruleset + - soc_timestamp queries: - name: "All Detections" query: "_id:*" @@ -2050,6 +2051,8 @@ soc: query: 'so_detection.language:sigma AND so_detection.content: "*product: windows*"' - name: "Detection Type - Yara (Strelka)" query: "so_detection.language:yara" + - name: "Security Onion - Grid Detections" + query: "so_detection.ruleset:securityonion-resources" detection: presets: severity: From 505eeea66a1c368e2d2f0b9f1b40dd63eadc1bad Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Tue, 2 Apr 2024 09:39:54 -0400 Subject: [PATCH 285/777] Update defaults.yaml --- salt/soc/defaults.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 8b6bceef0..2d5881ffa 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -561,7 +561,7 @@ soc: - process.executable - user.name - event.dataset - ':strelka:file': + ':strelka:': - soc_timestamp - file.name - file.size From f0835586662bb7fd1072a205f1ac1803e6699a8d Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Tue, 2 Apr 2024 09:42:43 -0400 Subject: [PATCH 286/777] break out into sep func --- salt/manager/tools/sbin/soup | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 30a170ea7..a71775501 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -582,6 +582,11 @@ up_to_2.4.60() { } up_to_2.4.70() { + toggle_telemetry() + INSTALLEDVERSION=2.4.70 +} + +toggle_telemetry() { if [[ -z $UNATTENDED && $is_airgap -ne 0 ]]; then cat << ASSIST_EOF @@ -613,7 +618,6 @@ ASSIST_EOF fi echo "" fi - INSTALLEDVERSION=2.4.70 } determine_elastic_agent_upgrade() { From 6c2437f8ef2f9edadf2d2d774b7b8c717bc8b90e Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Tue, 2 Apr 2024 09:55:56 -0400 Subject: [PATCH 287/777] FEATURE: Add Events table columns for event.module playbook #12703 --- salt/soc/defaults.yaml | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 2d5881ffa..a78ea88e1 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1200,6 +1200,17 @@ soc: - soc_timestamp - event.dataset - message + ':playbook:': + - soc_timestamp + - rule.name + - event.severity_label + - event_data.event.dataset + - event_data.source.ip + - event_data.source.port + - event_data.destination.host + - event_data.destination.port + - event_data.process.executable + - event_data.process.pid server: bindAddress: 0.0.0.0:9822 baseUrl: / @@ -1876,11 +1887,13 @@ soc: - soc_timestamp - rule.name - event.severity_label - - event_data.event.module - - event_data.event.category + - event_data.event.dataset + - event_data.source.ip + - event_data.source.port + - event_data.destination.host + - event_data.destination.port - event_data.process.executable - event_data.process.pid - - event_data.winlog.computer_name queryBaseFilter: tags:alert queryToggleFilters: - name: acknowledged From b2b54ccf60724a3ed9ac591c638dd7902fdb17f7 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Tue, 2 Apr 2024 10:11:16 -0400 Subject: [PATCH 288/777] FEATURE: Add Events table columns for event.module strelka #12716 --- salt/soc/defaults.yaml | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index a78ea88e1..db98b6b2f 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1893,7 +1893,16 @@ soc: - event_data.destination.host - event_data.destination.port - event_data.process.executable - - event_data.process.pid + - event_data.process.pid + ':strelka:': + - soc_timestamp + - file.name + - file.size + - hash.md5 + - file.source + - file.mime_type + - log.id.fuid + - event.dataset queryBaseFilter: tags:alert queryToggleFilters: - name: acknowledged From 2f03cbf11535b8b33190da15b2695d724df75336 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Tue, 2 Apr 2024 10:42:20 -0400 Subject: [PATCH 289/777] FEATURE: Add Events table columns for event.module strelka #12716 --- salt/soc/defaults.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index db98b6b2f..711bba8d6 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -570,6 +570,15 @@ soc: - file.mime_type - log.id.fuid - event.dataset + ':strelka:file': + - soc_timestamp + - file.name + - file.size + - hash.md5 + - file.source + - file.mime_type + - log.id.fuid + - event.dataset ':suricata:': - soc_timestamp - source.ip From 26abe9067154676df31060bec3b3981ca5af0e05 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 2 Apr 2024 12:19:46 -0400 Subject: [PATCH 290/777] Removed duplicate kafka setup Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- setup/so-setup | 9 --------- 1 file changed, 9 deletions(-) diff --git a/setup/so-setup b/setup/so-setup index a50fea19d..191b25ef2 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -629,15 +629,6 @@ if ! [[ -f $install_opt_file ]]; then set_minion_info whiptail_end_settings - elif [[ $is_kafka ]]; then - info "Setting up as node type Kafka broker" - #check_requirements "kafka" - networking_needful - collect_mngr_hostname - add_mngr_ip_to_hosts - check_manager_connection - set_minion_info - whiptail_end_settings fi if [[ $waitforstate ]]; then From 3b112e20e3129d65a98161d22c998b9cd46dd3af Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Tue, 2 Apr 2024 12:32:33 -0400 Subject: [PATCH 291/777] fix syntax error --- salt/manager/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index a71775501..455fd2256 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -582,7 +582,7 @@ up_to_2.4.60() { } up_to_2.4.70() { - toggle_telemetry() + toggle_telemetry INSTALLEDVERSION=2.4.70 } From 283939b18a23884f5cd3aa175a701cc99556cef4 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 2 Apr 2024 15:36:01 -0400 Subject: [PATCH 292/777] Gather metrics from elastic agent to influx --- salt/manager/init.sls | 20 +++++++++++++ .../manager/tools/sbin/so-elasticagent-status | 10 +++++++ salt/telegraf/scripts/agentstatus.sh | 30 +++++++++++++++++++ 3 files changed, 60 insertions(+) create mode 100644 salt/manager/tools/sbin/so-elasticagent-status create mode 100644 salt/telegraf/scripts/agentstatus.sh diff --git a/salt/manager/init.sls b/salt/manager/init.sls index 0ff4fa85a..d979482ef 100644 --- a/salt/manager/init.sls +++ b/salt/manager/init.sls @@ -27,6 +27,15 @@ repo_log_dir: - user - group +agents_log_dir: + file.directory: + - name: /opt/so/log/agents + - user: root + - group: root + - recurse: + - user + - group + yara_log_dir: file.directory: - name: /opt/so/log/yarasync @@ -101,6 +110,17 @@ so-repo-sync: - hour: '{{ MANAGERMERGED.reposync.hour }}' - minute: '{{ MANAGERMERGED.reposync.minute }}' +so_fleetagent_status: + cron.present: + - name: /usr/sbin/so-elasticagent-status > /opt/so/log/agents/agentstatus.log 2>&1 + - identifier: so_fleetagent_status + - user: root + - minute: '*/5' + - hour: '*' + - daymonth: '*' + - month: '*' + - dayweek: '*' + socore_own_saltstack: file.directory: - name: /opt/so/saltstack diff --git a/salt/manager/tools/sbin/so-elasticagent-status b/salt/manager/tools/sbin/so-elasticagent-status new file mode 100644 index 000000000..dffd76660 --- /dev/null +++ b/salt/manager/tools/sbin/so-elasticagent-status @@ -0,0 +1,10 @@ +#!/bin/bash + +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +. /usr/sbin/so-common + +curl -s -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/agent_status" | jq . \ No newline at end of file diff --git a/salt/telegraf/scripts/agentstatus.sh b/salt/telegraf/scripts/agentstatus.sh new file mode 100644 index 000000000..1673e7bd2 --- /dev/null +++ b/salt/telegraf/scripts/agentstatus.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + + + +# if this script isn't already running +if [[ ! "`pidof -x $(basename $0) -o %PPID`" ]]; then + + LOGFILE=$(cat /opt/so/log/agents/agentstatus.log) + ONLINE=$(cat $LOGFILE | grep -wF online | awk '{print $2}' | tr -d ',') + ERROR=$(cat $LOGFILE | grep -wF error | awk '{print $2}' | tr -d ',') + INACTIVE=$(cat $LOGFILE | grep -wF inactive | awk '{print $2}' | tr -d ',') + OFFLINE=$(cat $LOGFILE | grep -wF offline | awk '{print $2}' | tr -d ',') + UPDATING=$(cat $LOGFILE | grep -wF updating | awk '{print $2}' | tr -d ',') + UNENROLLED=$(cat $LOGFILE | grep -wF unenrolled | awk '{print $2}' | tr -d ',') + OTHER=$(cat $LOGFILE | grep -wF other | awk '{print $2}' | tr -d ',') + EVENTS=$(cat $LOGFILE | grep -wF events | awk '{print $2}' | tr -d ',') + TOTAL=$(cat $LOGFILE | grep -wF total | awk '{print $2}' | tr -d ',') + ALL=$(cat $LOGFILE | grep -wF all | awk '{print $2}' | tr -d ',') + ACTIVE=$(cat $LOGFILE | grep -wF active | awk '{print $2}') + + echo "agentstatus online=$ONLINE,error=$ERROR,inactive=$INACTIVE,offline=$OFFLINE,updating=$UPDATING,unenrolled=$UNENROLLED,other=$OTHER,events=$EVENTS,total=$TOTAL,all=$ALL,active=$ACTIVE" + +fi + +exit 0 From 780ad9eb10ebdd20258f0b3daa875167eb6b78b0 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 2 Apr 2024 15:50:25 -0400 Subject: [PATCH 293/777] add kafka to manager nodes --- salt/allowed_states.map.jinja | 3 ++- salt/top.sls | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/salt/allowed_states.map.jinja b/salt/allowed_states.map.jinja index 460e3e9eb..6fa60c2ea 100644 --- a/salt/allowed_states.map.jinja +++ b/salt/allowed_states.map.jinja @@ -101,7 +101,8 @@ 'utility', 'schedule', 'docker_clean', - 'stig' + 'stig', + 'kafka' ], 'so-managersearch': [ 'salt.master', diff --git a/salt/top.sls b/salt/top.sls index b418768ad..289dd462b 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -106,6 +106,7 @@ base: - utility - elasticfleet - stig + - kafka '*_standalone and G@saltversion:{{saltversion}}': - match: compound From b6187ab76992f1e37328b58c0d017952706df359 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Tue, 2 Apr 2024 15:54:39 -0400 Subject: [PATCH 294/777] Improve wording for Airgap annotation --- salt/global/soc_global.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/global/soc_global.yaml b/salt/global/soc_global.yaml index a48476214..910babcf4 100644 --- a/salt/global/soc_global.yaml +++ b/salt/global/soc_global.yaml @@ -28,7 +28,7 @@ global: description: Used for handling of authentication cookies. global: True airgap: - description: Sets airgap mode. + description: Airgapped systems do not have network connectivity to the internet. This setting represents how this grid was configured during initial setup. While it is technically possible to manually switch systems between airgap and non-airgap, there are some nuances and additional steps involved. For that reason this setting is marked read-only. Contact your support representative for guidance if there is a need to chane this setting. global: True readonly: True imagerepo: From f7534a0ae3dbc8ec34556571afb0bdbc314d6172 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 2 Apr 2024 16:01:12 -0400 Subject: [PATCH 295/777] make manager download so-kafka container --- salt/common/tools/sbin/so-image-common | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/common/tools/sbin/so-image-common b/salt/common/tools/sbin/so-image-common index 752ec20e0..faf8d50a9 100755 --- a/salt/common/tools/sbin/so-image-common +++ b/salt/common/tools/sbin/so-image-common @@ -50,6 +50,7 @@ container_list() { "so-idh" "so-idstools" "so-influxdb" + "so-kafka" "so-kibana" "so-kratos" "so-logstash" From 1b49c8540e466f9b9c602b77245dac58cc5609e4 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 2 Apr 2024 16:32:15 -0400 Subject: [PATCH 296/777] Fix kafka keystore script Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/config.sls | 208 +++++++++--------- .../sbin_jinja/so-kafka-generate-keystore | 2 +- 2 files changed, 109 insertions(+), 101 deletions(-) diff --git a/salt/kafka/config.sls b/salt/kafka/config.sls index 8caaa01cd..ddf2777a1 100644 --- a/salt/kafka/config.sls +++ b/salt/kafka/config.sls @@ -1,101 +1,109 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} -{% from 'vars/globals.map.jinja' import GLOBALS %} - -{% set kafka_ips_logstash = [] %} -{% set kafka_ips_kraft = [] %} -{% set kafkanodes = salt['pillar.get']('kafka:nodes', {}) %} -{% set kafka_nodeid = salt['pillar.get']('kafka:nodes:' ~ GLOBALS.hostname ~ ':nodeid') %} -{% set kafka_ip = GLOBALS.node_ip %} - -{% set nodes = salt['pillar.get']('kafka:nodes', {}) %} -{% set combined = [] %} -{% for hostname, data in nodes.items() %} - {% do combined.append(data.nodeid ~ "@" ~ hostname) %} -{% endfor %} -{% set kraft_controller_quorum_voters = ','.join(combined) %} - -{# Create list for kafka <-> logstash/searchnode communcations #} -{% for node, node_data in kafkanodes.items() %} -{% do kafka_ips_logstash.append(node_data['ip'] + ":9092") %} -{% endfor %} -{% set kafka_server_list = "','".join(kafka_ips_logstash) %} - -{# Create a list for kraft controller <-> kraft controller communications. Used for Kafka metadata management #} -{% for node, node_data in kafkanodes.items() %} -{% do kafka_ips_kraft.append(node_data['nodeid'] ~ "@" ~ node_data['ip'] ~ ":9093") %} -{% endfor %} -{% set kraft_server_list = "','".join(kafka_ips_kraft) %} - - -include: - - ssl - -kafka_group: - group.present: - - name: kafka - - gid: 960 - -kafka: - user.present: - - uid: 960 - - gid: 960 - -{# Future tools to query kafka directly / show consumer groups -kafka_sbin_tools: - file.recurse: - - name: /usr/sbin - - source: salt://kafka/tools/sbin - - user: 960 - - group: 960 - - file_mode: 755 #} - -kakfa_log_dir: - file.directory: - - name: /opt/so/log/kafka - - user: 960 - - group: 960 - - makedirs: True - -kafka_data_dir: - file.directory: - - name: /nsm/kafka/data - - user: 960 - - group: 960 - - makedirs: True - -{# When docker container is created an added to registry. Update so-kafka-generate-keystore script #} -kafka_keystore_script: - cmd.script: - - source: salt://kafka/tools/sbin_jinja/so-kafka-generate-keystore - - tempalte: jinja - - cwd: /opt/so - - defaults: - GLOBALS: {{ GLOBALS }} - -kafka_kraft_server_properties: - file.managed: - - source: salt://kafka/etc/server.properties.jinja - - name: /opt/so/conf/kafka/server.properties - - template: jinja - - defaults: - kafka_nodeid: {{ kafka_nodeid }} - kraft_controller_quorum_voters: {{ kraft_controller_quorum_voters }} - kafka_ip: {{ kafka_ip }} - - user: 960 - - group: 960 - - makedirs: True - - show_changes: False - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% from 'allowed_states.map.jinja' import allowed_states %} +{% if sls.split('.')[0] in allowed_states %} +{% from 'vars/globals.map.jinja' import GLOBALS %} + +{% set kafka_ips_logstash = [] %} +{% set kafka_ips_kraft = [] %} +{% set kafkanodes = salt['pillar.get']('kafka:nodes', {}) %} +{% set kafka_nodeid = salt['pillar.get']('kafka:nodes:' ~ GLOBALS.hostname ~ ':nodeid') %} +{% set kafka_ip = GLOBALS.node_ip %} + +{% set nodes = salt['pillar.get']('kafka:nodes', {}) %} +{% set combined = [] %} +{% for hostname, data in nodes.items() %} + {% do combined.append(data.nodeid ~ "@" ~ hostname ~ ":9093") %} +{% endfor %} +{% set kraft_controller_quorum_voters = ','.join(combined) %} + +{# Create list for kafka <-> logstash/searchnode communcations #} +{% for node, node_data in kafkanodes.items() %} +{% do kafka_ips_logstash.append(node_data['ip'] + ":9092") %} +{% endfor %} +{% set kafka_server_list = "','".join(kafka_ips_logstash) %} + +{# Create a list for kraft controller <-> kraft controller communications. Used for Kafka metadata management #} +{% for node, node_data in kafkanodes.items() %} +{% do kafka_ips_kraft.append(node_data['nodeid'] ~ "@" ~ node_data['ip'] ~ ":9093") %} +{% endfor %} +{% set kraft_server_list = "','".join(kafka_ips_kraft) %} + + +include: + - ssl + +kafka_group: + group.present: + - name: kafka + - gid: 960 + +kafka: + user.present: + - uid: 960 + - gid: 960 + +{# Future tools to query kafka directly / show consumer groups +kafka_sbin_tools: + file.recurse: + - name: /usr/sbin + - source: salt://kafka/tools/sbin + - user: 960 + - group: 960 + - file_mode: 755 #} + +kafka_sbin_jinja_tools: + file.recurse: + - name: /usr/sbin + - source: salt://kafka/tools/sbin_jinja + - user: 960 + - group: 960 + - file_mode: 755 + - template: jinja + +kakfa_log_dir: + file.directory: + - name: /opt/so/log/kafka + - user: 960 + - group: 960 + - makedirs: True + +kafka_data_dir: + file.directory: + - name: /nsm/kafka/data + - user: 960 + - group: 960 + - makedirs: True + +kafka_keystore_script: + cmd.script: + - source: salt://kafka/tools/sbin_jinja/so-kafka-generate-keystore + - template: jinja + - cwd: /opt/so + - defaults: + GLOBALS: {{ GLOBALS }} + +kafka_kraft_server_properties: + file.managed: + - source: salt://kafka/etc/server.properties.jinja + - name: /opt/so/conf/kafka/server.properties + - template: jinja + - defaults: + kafka_nodeid: {{ kafka_nodeid }} + kraft_controller_quorum_voters: {{ kraft_controller_quorum_voters }} + kafka_ip: {{ kafka_ip }} + - user: 960 + - group: 960 + - makedirs: True + - show_changes: False + +{% else %} + +{{sls}}_state_not_allowed: + test.fail_without_changes: + - name: {{sls}}_state_not_allowed + {% endif %} \ No newline at end of file diff --git a/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore b/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore index 69bb6ad87..1809c7a93 100644 --- a/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore +++ b/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore @@ -8,7 +8,7 @@ . /usr/sbin/so-common if [ ! -f /etc/pki/kafka.jks ]; then - docker run -v /etc/pki/kafka.p12:/etc/pki/kafka.p12 --name so-kafka-keystore --user root --entrypoint keytool so-kafka -importkeystore -srckeystore /etc/pki/kafka.p12 -srcstoretype PKCS12 -srsstorepass changeit -destkeystore /etc/pki/kafka.jks -deststoretype JKS -deststorepass changeit -alias kafkastore -noprompt + docker run -v /etc/pki/kafka.p12:/etc/pki/kafka.p12 --name so-kafka-keystore --user root --entrypoint keytool {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }} -importkeystore -srckeystore /etc/pki/kafka.p12 -srcstoretype PKCS12 -srcstorepass changeit -destkeystore /etc/pki/kafka.jks -deststoretype JKS -deststorepass changeit -noprompt docker cp so-kafka-keystore:/etc/pki/kafka.jks /etc/pki/kafka.jks docker rm so-kafka-keystore else From b032eed22a94a80a62e83f4dfe79914523a7024c Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 2 Apr 2024 16:34:06 -0400 Subject: [PATCH 297/777] Update kafka to use manager docker registry Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/common/tools/sbin/so-image-common | 3 +- salt/kafka/enabled.sls | 91 +++++++++++++------------- 2 files changed, 48 insertions(+), 46 deletions(-) diff --git a/salt/common/tools/sbin/so-image-common b/salt/common/tools/sbin/so-image-common index 7900b3c52..d322c8e9b 100755 --- a/salt/common/tools/sbin/so-image-common +++ b/salt/common/tools/sbin/so-image-common @@ -67,7 +67,8 @@ container_list() { "so-strelka-manager" "so-suricata" "so-telegraf" - "so-zeek" + "so-zeek" + "so-kafka" ) else TRUSTED_CONTAINERS=( diff --git a/salt/kafka/enabled.sls b/salt/kafka/enabled.sls index 1bf7dcf8b..31d375e23 100644 --- a/salt/kafka/enabled.sls +++ b/salt/kafka/enabled.sls @@ -1,46 +1,47 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} -{% from 'vars/globals.map.jinja' import GLOBALS %} -{% from 'docker/docker.map.jinja' import DOCKER %} - -include: - - kafka.sostatus - - kafka.config - - kafka.storage - -so-kafka: - docker_container.running: - - image: so-kafka - - hostname: so-kafka - - name: so-kafka - - networks: - - sobridge: - - ipv4_address: {{ DOCKER.containers['so-kafka'].ip }} - - user: kafka - - port_bindings: - {% for BINDING in DOCKER.containers['so-kafka'].port_bindings %} - - {{ BINDING }} - {% endfor %} - - binds: - - /etc/pki/kafka.jks:/etc/pki/kafka.jks - - /opt/so/conf/ca/cacerts:/etc/pki/java/sos/cacerts - - /nsm/kafka/data/:/nsm/kafka/data/:rw - - /opt/so/conf/kafka/server.properties:/kafka/config/kraft/server.properties - -delete_so-kafka_so-status.disabled: - file.uncomment: - - name: /opt/so/conf/so-status/so-status.conf - - regex: ^so-kafka$ - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% from 'allowed_states.map.jinja' import allowed_states %} +{% if sls.split('.')[0] in allowed_states %} +{% from 'vars/globals.map.jinja' import GLOBALS %} +{% from 'docker/docker.map.jinja' import DOCKER %} +{% set KAFKANODES = salt['pillar.get']('kafka:nodes', {}) %} + +include: + - kafka.sostatus + - kafka.config + - kafka.storage + +so-kafka: + docker_container.running: + - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }} + - hostname: so-kafka + - name: so-kafka + - networks: + - sobridge: + - ipv4_address: {{ DOCKER.containers['so-kafka'].ip }} + - user: kafka + - port_bindings: + {% for BINDING in DOCKER.containers['so-kafka'].port_bindings %} + - {{ BINDING }} + {% endfor %} + - binds: + - /etc/pki/kafka.jks:/etc/pki/kafka.jks + - /opt/so/conf/ca/cacerts:/etc/pki/java/sos/cacerts + - /nsm/kafka/data/:/nsm/kafka/data/:rw + - /opt/so/conf/kafka/server.properties:/kafka/config/kraft/server.properties + +delete_so-kafka_so-status.disabled: + file.uncomment: + - name: /opt/so/conf/so-status/so-status.conf + - regex: ^so-kafka$ + +{% else %} + +{{sls}}_state_not_allowed: + test.fail_without_changes: + - name: {{sls}}_state_not_allowed + {% endif %} \ No newline at end of file From 643d4831c10b09714021ae97e19e0ea679433b94 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 2 Apr 2024 16:35:14 -0400 Subject: [PATCH 298/777] CRLF -> LF Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/init.sls | 18 +++++----- salt/kafka/sostatus.sls | 40 ++++++++++----------- salt/manager/tools/sbin/so-kafka-clusterid | 42 +++++++++++----------- 3 files changed, 50 insertions(+), 50 deletions(-) diff --git a/salt/kafka/init.sls b/salt/kafka/init.sls index 653cd4b88..903c66867 100644 --- a/salt/kafka/init.sls +++ b/salt/kafka/init.sls @@ -1,9 +1,9 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{# Create map.jinja to enable / disable kafka from UI #} -{# Temporarily just enable kafka #} -include: - - kafka.enabled +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{# Create map.jinja to enable / disable kafka from UI #} +{# Temporarily just enable kafka #} +include: + - kafka.enabled diff --git a/salt/kafka/sostatus.sls b/salt/kafka/sostatus.sls index 4c7519964..37c868a46 100644 --- a/salt/kafka/sostatus.sls +++ b/salt/kafka/sostatus.sls @@ -1,21 +1,21 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} - -append_so-kafka_so-status.conf: - file.append: - - name: /opt/so/conf/so-status/so-status.conf - - text: so-kafka - - unless: grep -q so-kafka /opt/so/conf/so-status/so-status.conf - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% from 'allowed_states.map.jinja' import allowed_states %} +{% if sls.split('.')[0] in allowed_states %} + +append_so-kafka_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-kafka + - unless: grep -q so-kafka /opt/so/conf/so-status/so-status.conf + +{% else %} + +{{sls}}_state_not_allowed: + test.fail_without_changes: + - name: {{sls}}_state_not_allowed + {% endif %} \ No newline at end of file diff --git a/salt/manager/tools/sbin/so-kafka-clusterid b/salt/manager/tools/sbin/so-kafka-clusterid index 64833a0d2..719973247 100644 --- a/salt/manager/tools/sbin/so-kafka-clusterid +++ b/salt/manager/tools/sbin/so-kafka-clusterid @@ -1,22 +1,22 @@ -#!/bin/bash - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -local_salt_dir=/opt/so/saltstack/local - -if [[ -f /usr/sbin/so-common ]]; then - source /usr/sbin/so-common -else - source $(dirname $0)/../../../common/tools/sbin/so-common -fi - -if ! grep -q "^ kafka_cluster_id:" $local_salt_dir/pillar/secrets.sls; then - kafka_cluster_id=$(get_random_value 22) - echo ' kafka_cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/secrets.sls -else - echo 'kafka_cluster_id exists' - salt-call pillar.get secrets +#!/bin/bash + +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +local_salt_dir=/opt/so/saltstack/local + +if [[ -f /usr/sbin/so-common ]]; then + source /usr/sbin/so-common +else + source $(dirname $0)/../../../common/tools/sbin/so-common +fi + +if ! grep -q "^ kafka_cluster_id:" $local_salt_dir/pillar/secrets.sls; then + kafka_cluster_id=$(get_random_value 22) + echo ' kafka_cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/secrets.sls +else + echo 'kafka_cluster_id exists' + salt-call pillar.get secrets fi \ No newline at end of file From 7f5741c43b9eac3f0409a1eaa7dbe44fc70140d2 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 2 Apr 2024 16:36:22 -0400 Subject: [PATCH 299/777] Fix kafka storage setup Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/storage.sls | 60 +++++++++++++++++++++--------------------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/salt/kafka/storage.sls b/salt/kafka/storage.sls index dc114ef4f..778c054e2 100644 --- a/salt/kafka/storage.sls +++ b/salt/kafka/storage.sls @@ -1,31 +1,31 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} -{% from 'vars/globals.map.jinja' import GLOBALS %} -{% set kafka_cluster_id = salt['pillar.get']('secrets:kafka_cluster_id')%} - -{# Initialize kafka storage if it doesn't already exist. Just looking for meta.properties in /nsm/kafka/data #} -{% if salt['file.file_exists']('/nsm/kafka/data/meta.properties') %} -{% else %} -kafka_storage_init: - cmd.run: - - name: | - docker run -v /nsm/kafka/data:/nsm/kafka/data -v /opt/so/conf/kafka/server.properties:/kafka/config/kraft/newserver.properties --name so-kafkainit --user root --entrypoint /kafka/bin/kafka-storage.sh so-kafka format -t {{ kafka_cluster_id }} -c /kafka/config/kraft/server.properties -kafka_rm_kafkainit: - cmd.run: - - name: | - docker rm so-kafkainit -{% endif %} - - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% from 'allowed_states.map.jinja' import allowed_states %} +{% if sls.split('.')[0] in allowed_states %} +{% from 'vars/globals.map.jinja' import GLOBALS %} +{% set kafka_cluster_id = salt['pillar.get']('secrets:kafka_cluster_id')%} + +{# Initialize kafka storage if it doesn't already exist. Just looking for meta.properties in /nsm/kafka/data #} +{% if salt['file.file_exists']('/nsm/kafka/data/meta.properties') %} +{% else %} +kafka_storage_init: + cmd.run: + - name: | + docker run -v /nsm/kafka/data:/nsm/kafka/data -v /opt/so/conf/kafka/server.properties:/kafka/config/kraft/newserver.properties --name so-kafkainit --user root --entrypoint /kafka/bin/kafka-storage.sh {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }} format -t {{ kafka_cluster_id }} -c /kafka/config/kraft/newserver.properties +kafka_rm_kafkainit: + cmd.run: + - name: | + docker rm so-kafkainit +{% endif %} + + +{% else %} + +{{sls}}_state_not_allowed: + test.fail_without_changes: + - name: {{sls}}_state_not_allowed + {% endif %} \ No newline at end of file From 82830c81733a925bf9c6c4748946d53ba1039358 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 2 Apr 2024 16:37:39 -0400 Subject: [PATCH 300/777] Fix typos and fix error related to elasticsearch saltstate being called from logstash state. Logstash will be removed from kafkanodes in future Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/etc/server.properties.jinja | 244 ++++++++++++------------- salt/logstash/config.sls | 2 +- 2 files changed, 123 insertions(+), 123 deletions(-) diff --git a/salt/kafka/etc/server.properties.jinja b/salt/kafka/etc/server.properties.jinja index ad5ac67a9..eb60eda60 100644 --- a/salt/kafka/etc/server.properties.jinja +++ b/salt/kafka/etc/server.properties.jinja @@ -1,123 +1,123 @@ -# This configuration file is intended for use in KRaft mode, where -# Apache ZooKeeper is not present. See config/kraft/README.md for details. -# - -############################# Server Basics ############################# - -# The role of this server. Setting this puts us in KRaft mode -process.roles=broker,controller - -# The node id associated with this instance's roles -node.id={{ kafka_nodeid }} - -# The connect string for the controller quorum -controller.quorum.voters={{ kraft_controller_quorum_voters }} - -############################# Socket Server Settings ############################# - -# The address the socket server listens on. -# Combined nodes (i.e. those with `process.roles=broker,controller`) must list the controller listener here at a minimum. -# If the broker listener is not defined, the default listener will use a host name that is equal to the value of java.net.InetAddress.getCanonicalHostName(), -# with PLAINTEXT listener name, and port 9092. -# FORMAT: -# listeners = listener_name://host_name:port -# EXAMPLE: -# listeners = PLAINTEXT://your.host.name:9092 -listeners=BROKER://{{ kafka_ip }}:9092,CONTROLLER://{{ kafka_ip }}:9093 - -# Name of listener used for communication between brokers. -inter.broker.listener.name=BROKER - -# Listener name, hostname and port the broker will advertise to clients. -# If not set, it uses the value for "listeners". -advertised.listeners=BROKER://{{ kafka_ip }}:9092 - -# A comma-separated list of the names of the listeners used by the controller. -# If no explicit mapping set in `listener.security.protocol.map`, default will be using PLAINTEXT protocol -# This is required if running in KRaft mode. -controller.listener.names=CONTROLLER - -# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details -listener.security.protocol.map=CONTROLLER:SSL,BROKER:SSL - -#SSL configuration -ssl.keystore.location=/etc/pki/kafka.jks -ssl.keystore.pasword=changeit -ssl.keystore.type=JKS -ssl.truststore.location=/etc/pki/java/sos/cacerts -ssl.truststore.password=changeit - -# The number of threads that the server uses for receiving requests from the network and sending responses to the network -num.network.threads=3 - -# The number of threads that the server uses for processing requests, which may include disk I/O -num.io.threads=8 - -# The send buffer (SO_SNDBUF) used by the socket server -socket.send.buffer.bytes=102400 - -# The receive buffer (SO_RCVBUF) used by the socket server -socket.receive.buffer.bytes=102400 - -# The maximum size of a request that the socket server will accept (protection against OOM) -socket.request.max.bytes=104857600 - - -############################# Log Basics ############################# - -# A comma separated list of directories under which to store log files -log.dirs=/nsm/kafka/data - -# The default number of log partitions per topic. More partitions allow greater -# parallelism for consumption, but this will also result in more files across -# the brokers. -num.partitions=1 - -# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. -# This value is recommended to be increased for installations with data dirs located in RAID array. -num.recovery.threads.per.data.dir=1 - -############################# Internal Topic Settings ############################# -# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" -# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. -offsets.topic.replication.factor=1 -transaction.state.log.replication.factor=1 -transaction.state.log.min.isr=1 - -############################# Log Flush Policy ############################# - -# Messages are immediately written to the filesystem but by default we only fsync() to sync -# the OS cache lazily. The following configurations control the flush of data to disk. -# There are a few important trade-offs here: -# 1. Durability: Unflushed data may be lost if you are not using replication. -# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. -# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks. -# The settings below allow one to configure the flush policy to flush data after a period of time or -# every N messages (or both). This can be done globally and overridden on a per-topic basis. - -# The number of messages to accept before forcing a flush of data to disk -#log.flush.interval.messages=10000 - -# The maximum amount of time a message can sit in a log before we force a flush -#log.flush.interval.ms=1000 - -############################# Log Retention Policy ############################# - -# The following configurations control the disposal of log segments. The policy can -# be set to delete segments after a period of time, or after a given size has accumulated. -# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens -# from the end of the log. - -# The minimum age of a log file to be eligible for deletion due to age -log.retention.hours=168 - -# A size-based retention policy for logs. Segments are pruned from the log unless the remaining -# segments drop below log.retention.bytes. Functions independently of log.retention.hours. -#log.retention.bytes=1073741824 - -# The maximum size of a log segment file. When this size is reached a new log segment will be created. -log.segment.bytes=1073741824 - -# The interval at which log segments are checked to see if they can be deleted according -# to the retention policies +# This configuration file is intended for use in KRaft mode, where +# Apache ZooKeeper is not present. See config/kraft/README.md for details. +# + +############################# Server Basics ############################# + +# The role of this server. Setting this puts us in KRaft mode +process.roles=broker,controller + +# The node id associated with this instance's roles +node.id={{ kafka_nodeid }} + +# The connect string for the controller quorum +controller.quorum.voters={{ kraft_controller_quorum_voters }} + +############################# Socket Server Settings ############################# + +# The address the socket server listens on. +# Combined nodes (i.e. those with `process.roles=broker,controller`) must list the controller listener here at a minimum. +# If the broker listener is not defined, the default listener will use a host name that is equal to the value of java.net.InetAddress.getCanonicalHostName(), +# with PLAINTEXT listener name, and port 9092. +# FORMAT: +# listeners = listener_name://host_name:port +# EXAMPLE: +# listeners = PLAINTEXT://your.host.name:9092 +listeners=BROKER://{{ kafka_ip }}:9092,CONTROLLER://{{ kafka_ip }}:9093 + +# Name of listener used for communication between brokers. +inter.broker.listener.name=BROKER + +# Listener name, hostname and port the broker will advertise to clients. +# If not set, it uses the value for "listeners". +advertised.listeners=BROKER://{{ kafka_ip }}:9092 + +# A comma-separated list of the names of the listeners used by the controller. +# If no explicit mapping set in `listener.security.protocol.map`, default will be using PLAINTEXT protocol +# This is required if running in KRaft mode. +controller.listener.names=CONTROLLER + +# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details +listener.security.protocol.map=CONTROLLER:SSL,BROKER:SSL + +#SSL configuration +ssl.keystore.location=/etc/pki/kafka.jks +ssl.keystore.password=changeit +ssl.keystore.type=JKS +ssl.truststore.location=/etc/pki/java/sos/cacerts +ssl.truststore.password=changeit + +# The number of threads that the server uses for receiving requests from the network and sending responses to the network +num.network.threads=3 + +# The number of threads that the server uses for processing requests, which may include disk I/O +num.io.threads=8 + +# The send buffer (SO_SNDBUF) used by the socket server +socket.send.buffer.bytes=102400 + +# The receive buffer (SO_RCVBUF) used by the socket server +socket.receive.buffer.bytes=102400 + +# The maximum size of a request that the socket server will accept (protection against OOM) +socket.request.max.bytes=104857600 + + +############################# Log Basics ############################# + +# A comma separated list of directories under which to store log files +log.dirs=/nsm/kafka/data + +# The default number of log partitions per topic. More partitions allow greater +# parallelism for consumption, but this will also result in more files across +# the brokers. +num.partitions=1 + +# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. +# This value is recommended to be increased for installations with data dirs located in RAID array. +num.recovery.threads.per.data.dir=1 + +############################# Internal Topic Settings ############################# +# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" +# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. +offsets.topic.replication.factor=1 +transaction.state.log.replication.factor=1 +transaction.state.log.min.isr=1 + +############################# Log Flush Policy ############################# + +# Messages are immediately written to the filesystem but by default we only fsync() to sync +# the OS cache lazily. The following configurations control the flush of data to disk. +# There are a few important trade-offs here: +# 1. Durability: Unflushed data may be lost if you are not using replication. +# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. +# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks. +# The settings below allow one to configure the flush policy to flush data after a period of time or +# every N messages (or both). This can be done globally and overridden on a per-topic basis. + +# The number of messages to accept before forcing a flush of data to disk +#log.flush.interval.messages=10000 + +# The maximum amount of time a message can sit in a log before we force a flush +#log.flush.interval.ms=1000 + +############################# Log Retention Policy ############################# + +# The following configurations control the disposal of log segments. The policy can +# be set to delete segments after a period of time, or after a given size has accumulated. +# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens +# from the end of the log. + +# The minimum age of a log file to be eligible for deletion due to age +log.retention.hours=168 + +# A size-based retention policy for logs. Segments are pruned from the log unless the remaining +# segments drop below log.retention.bytes. Functions independently of log.retention.hours. +#log.retention.bytes=1073741824 + +# The maximum size of a log segment file. When this size is reached a new log segment will be created. +log.segment.bytes=1073741824 + +# The interval at which log segments are checked to see if they can be deleted according +# to the retention policies log.retention.check.interval.ms=300000 \ No newline at end of file diff --git a/salt/logstash/config.sls b/salt/logstash/config.sls index 8a59c83b7..402d1ef20 100644 --- a/salt/logstash/config.sls +++ b/salt/logstash/config.sls @@ -12,7 +12,7 @@ include: - ssl - {% if GLOBALS.role not in ['so-receiver','so-fleet'] %} + {% if GLOBALS.role not in ['so-receiver','so-fleet', 'so-kafkanode'] %} - elasticsearch {% endif %} From 4e142e0212ec15994c429cbd1b90c8936e5daca1 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 2 Apr 2024 16:47:35 -0400 Subject: [PATCH 301/777] put alphabetical --- salt/common/tools/sbin/so-image-common | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/common/tools/sbin/so-image-common b/salt/common/tools/sbin/so-image-common index f1b8b86da..03051cb5f 100755 --- a/salt/common/tools/sbin/so-image-common +++ b/salt/common/tools/sbin/so-image-common @@ -66,7 +66,6 @@ container_list() { "so-suricata" "so-telegraf" "so-zeek" - "so-kafka" ) else TRUSTED_CONTAINERS=( From 1ee2a6d37b41c248b0261cd8f7681a7a517a6dab Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 3 Apr 2024 08:21:30 -0400 Subject: [PATCH 302/777] Improve wording for Airgap annotation --- salt/global/soc_global.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/global/soc_global.yaml b/salt/global/soc_global.yaml index 910babcf4..daaf28b28 100644 --- a/salt/global/soc_global.yaml +++ b/salt/global/soc_global.yaml @@ -28,7 +28,7 @@ global: description: Used for handling of authentication cookies. global: True airgap: - description: Airgapped systems do not have network connectivity to the internet. This setting represents how this grid was configured during initial setup. While it is technically possible to manually switch systems between airgap and non-airgap, there are some nuances and additional steps involved. For that reason this setting is marked read-only. Contact your support representative for guidance if there is a need to chane this setting. + description: Airgapped systems do not have network connectivity to the internet. This setting represents how this grid was configured during initial setup. While it is technically possible to manually switch systems between airgap and non-airgap, there are some nuances and additional steps involved. For that reason this setting is marked read-only. Contact your support representative for guidance if there is a need to change this setting. global: True readonly: True imagerepo: From a8f25150f62a34e2a05138139ce1bdc68a52d85e Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Wed, 3 Apr 2024 08:21:50 -0400 Subject: [PATCH 303/777] Feature - auto-enabled Sigma rules --- salt/soc/defaults.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 8b6bceef0..1d0eb0e38 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1216,6 +1216,10 @@ soc: elastalertengine: allowRegex: '' autoUpdateEnabled: true + autoEnabledSigmaRules: + - core+critical + - securityonion-resources+critical + - securityonion-resources+high communityRulesImportFrequencySeconds: 86400 denyRegex: '' elastAlertRulesFolder: /opt/sensoroni/elastalert From c1b5ef08917307e677a1c2b555478c8836895790 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 3 Apr 2024 08:44:40 -0400 Subject: [PATCH 304/777] ensure so-yaml.py is updated during soup --- salt/common/soup_scripts.sls | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/salt/common/soup_scripts.sls b/salt/common/soup_scripts.sls index fd32b8a28..6e93954f0 100644 --- a/salt/common/soup_scripts.sls +++ b/salt/common/soup_scripts.sls @@ -68,3 +68,10 @@ copy_so-firewall_sbin: - source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-firewall - force: True - preserve: True + +copy_so-yaml_sbin: + file.copy: + - name: /usr/sbin/so-yaml.py + - source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-yaml.py + - force: True + - preserve: True From 639bf050812e98ce6155465314a6fa9614d615b8 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 3 Apr 2024 08:52:26 -0400 Subject: [PATCH 305/777] add so-manager to kafka.nodes pillar --- pillar/kafka/nodes.sls | 4 ++-- salt/kafka/config.sls | 11 +++++------ .../kafka/tools/sbin_jinja/so-kafka-generate-keystore | 2 +- 3 files changed, 8 insertions(+), 9 deletions(-) diff --git a/pillar/kafka/nodes.sls b/pillar/kafka/nodes.sls index a7d97ac9c..8fdd3ee07 100644 --- a/pillar/kafka/nodes.sls +++ b/pillar/kafka/nodes.sls @@ -1,4 +1,4 @@ -{% set current_kafkanodes = salt.saltutil.runner('mine.get', tgt='G@role:so-kafkanode', fun='network.ip_addrs', tgt_type='compound') %} +{% set current_kafkanodes = salt.saltutil.runner('mine.get', tgt='G@role:so-kafkanode or G@role:so-manager', fun='network.ip_addrs', tgt_type='compound') %} {% set pillar_kafkanodes = salt['pillar.get']('kafka:nodes', default={}, merge=True) %} {% set existing_ids = [] %} @@ -27,4 +27,4 @@ {% endfor %} kafka: - nodes: {{ final_nodes|tojson }} \ No newline at end of file + nodes: {{ final_nodes|tojson }} diff --git a/salt/kafka/config.sls b/salt/kafka/config.sls index ddf2777a1..c8d6f66e0 100644 --- a/salt/kafka/config.sls +++ b/salt/kafka/config.sls @@ -63,6 +63,8 @@ kafka_sbin_jinja_tools: - group: 960 - file_mode: 755 - template: jinja + - defaults: + GLOBALS: {{ GLOBALS }} kakfa_log_dir: file.directory: @@ -79,12 +81,9 @@ kafka_data_dir: - makedirs: True kafka_keystore_script: - cmd.script: - - source: salt://kafka/tools/sbin_jinja/so-kafka-generate-keystore - - template: jinja + cmd.run: + - name: /usr/sbin/so-kafka-generate-keystore - cwd: /opt/so - - defaults: - GLOBALS: {{ GLOBALS }} kafka_kraft_server_properties: file.managed: @@ -106,4 +105,4 @@ kafka_kraft_server_properties: test.fail_without_changes: - name: {{sls}}_state_not_allowed -{% endif %} \ No newline at end of file +{% endif %} diff --git a/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore b/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore index 1809c7a93..26f188377 100644 --- a/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore +++ b/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore @@ -13,4 +13,4 @@ if [ ! -f /etc/pki/kafka.jks ]; then docker rm so-kafka-keystore else exit 0 -fi \ No newline at end of file +fi From 8e47cc73a5429b5f6c4d106867c81276b19b13c0 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 3 Apr 2024 08:54:17 -0400 Subject: [PATCH 306/777] kafka.nodes pillar to lf --- pillar/kafka/nodes.sls | 60 +++++++++++++++++++++--------------------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/pillar/kafka/nodes.sls b/pillar/kafka/nodes.sls index 8fdd3ee07..b1842834c 100644 --- a/pillar/kafka/nodes.sls +++ b/pillar/kafka/nodes.sls @@ -1,30 +1,30 @@ -{% set current_kafkanodes = salt.saltutil.runner('mine.get', tgt='G@role:so-kafkanode or G@role:so-manager', fun='network.ip_addrs', tgt_type='compound') %} -{% set pillar_kafkanodes = salt['pillar.get']('kafka:nodes', default={}, merge=True) %} - -{% set existing_ids = [] %} -{% for node in pillar_kafkanodes.values() %} - {% if node.get('id') %} - {% do existing_ids.append(node['nodeid']) %} - {% endif %} -{% endfor %} -{% set all_possible_ids = range(1, 256)|list %} - -{% set available_ids = [] %} -{% for id in all_possible_ids %} - {% if id not in existing_ids %} - {% do available_ids.append(id) %} - {% endif %} -{% endfor %} - -{% set final_nodes = pillar_kafkanodes.copy() %} - -{% for minionid, ip in current_kafkanodes.items() %} - {% set hostname = minionid.split('_')[0] %} - {% if hostname not in final_nodes %} - {% set new_id = available_ids.pop(0) %} - {% do final_nodes.update({hostname: {'nodeid': new_id, 'ip': ip[0]}}) %} - {% endif %} -{% endfor %} - -kafka: - nodes: {{ final_nodes|tojson }} +{% set current_kafkanodes = salt.saltutil.runner('mine.get', tgt='G@role:so-kafkanode or G@role:so-manager', fun='network.ip_addrs', tgt_type='compound') %} +{% set pillar_kafkanodes = salt['pillar.get']('kafka:nodes', default={}, merge=True) %} + +{% set existing_ids = [] %} +{% for node in pillar_kafkanodes.values() %} + {% if node.get('id') %} + {% do existing_ids.append(node['nodeid']) %} + {% endif %} +{% endfor %} +{% set all_possible_ids = range(1, 256)|list %} + +{% set available_ids = [] %} +{% for id in all_possible_ids %} + {% if id not in existing_ids %} + {% do available_ids.append(id) %} + {% endif %} +{% endfor %} + +{% set final_nodes = pillar_kafkanodes.copy() %} + +{% for minionid, ip in current_kafkanodes.items() %} + {% set hostname = minionid.split('_')[0] %} + {% if hostname not in final_nodes %} + {% set new_id = available_ids.pop(0) %} + {% do final_nodes.update({hostname: {'nodeid': new_id, 'ip': ip[0]}}) %} + {% endif %} +{% endfor %} + +kafka: + nodes: {{ final_nodes|tojson }} From 0efdcfcb52becf95fe6e505c93f07175ea2785fb Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 3 Apr 2024 09:36:02 -0400 Subject: [PATCH 307/777] add agentstatus to telegraf --- salt/telegraf/etc/telegraf.conf | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/salt/telegraf/etc/telegraf.conf b/salt/telegraf/etc/telegraf.conf index 1c5801645..9ced1317c 100644 --- a/salt/telegraf/etc/telegraf.conf +++ b/salt/telegraf/etc/telegraf.conf @@ -202,6 +202,16 @@ insecure_skip_verify = true {%- endif %} +# # Get Agent Stats +{%- if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone', 'so-import'] %} +[[inputs.exec]] + commands = [ + "/scripts/agentstatus.sh" + ] + data_format = "influx" + timeout = "15s" +{%- endif %} + # ## Timeout for HTTP requests to the elastic search server(s) # http_timeout = "5s" # From 0dfde3c9f230f4b76208822cc05d910c0797b9f1 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 3 Apr 2024 09:40:14 -0400 Subject: [PATCH 308/777] add agentstatus to telegraf --- salt/telegraf/defaults.yaml | 4 ++++ salt/telegraf/etc/telegraf.conf | 10 ---------- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/salt/telegraf/defaults.yaml b/salt/telegraf/defaults.yaml index d3718f0b5..3cf2f731d 100644 --- a/salt/telegraf/defaults.yaml +++ b/salt/telegraf/defaults.yaml @@ -11,6 +11,7 @@ telegraf: quiet: 'false' scripts: eval: + - agentstatus.sh - checkfiles.sh - influxdbsize.sh - lasthighstate.sh @@ -23,6 +24,7 @@ telegraf: - zeekcaptureloss.sh - zeekloss.sh standalone: + - agentstatus.sh - checkfiles.sh - eps.sh - influxdbsize.sh @@ -38,6 +40,7 @@ telegraf: - zeekloss.sh - features.sh manager: + - agentstatus.sh - influxdbsize.sh - lasthighstate.sh - os.sh @@ -46,6 +49,7 @@ telegraf: - sostatus.sh - features.sh managersearch: + - agentstatus.sh - eps.sh - influxdbsize.sh - lasthighstate.sh diff --git a/salt/telegraf/etc/telegraf.conf b/salt/telegraf/etc/telegraf.conf index 9ced1317c..1c5801645 100644 --- a/salt/telegraf/etc/telegraf.conf +++ b/salt/telegraf/etc/telegraf.conf @@ -202,16 +202,6 @@ insecure_skip_verify = true {%- endif %} -# # Get Agent Stats -{%- if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone', 'so-import'] %} -[[inputs.exec]] - commands = [ - "/scripts/agentstatus.sh" - ] - data_format = "influx" - timeout = "15s" -{%- endif %} - # ## Timeout for HTTP requests to the elastic search server(s) # http_timeout = "5s" # From 513273c8c34868355fc9ca12d34c3dbfe74c8c8c Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 3 Apr 2024 09:43:55 -0400 Subject: [PATCH 309/777] add agentstatus to telegraf --- salt/telegraf/enabled.sls | 1 + salt/telegraf/scripts/agentstatus.sh | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/telegraf/enabled.sls b/salt/telegraf/enabled.sls index b1fa0c247..8e3bc9fbe 100644 --- a/salt/telegraf/enabled.sls +++ b/salt/telegraf/enabled.sls @@ -56,6 +56,7 @@ so-telegraf: - /opt/so/log/raid:/var/log/raid:ro - /opt/so/log/sostatus:/var/log/sostatus:ro - /opt/so/log/salt:/var/log/salt:ro + - /opt/so/log/agents:/var/log/agents:ro {% if DOCKER.containers['so-telegraf'].custom_bind_mounts %} {% for BIND in DOCKER.containers['so-telegraf'].custom_bind_mounts %} - {{ BIND }} diff --git a/salt/telegraf/scripts/agentstatus.sh b/salt/telegraf/scripts/agentstatus.sh index 1673e7bd2..ccba213f6 100644 --- a/salt/telegraf/scripts/agentstatus.sh +++ b/salt/telegraf/scripts/agentstatus.sh @@ -10,7 +10,7 @@ # if this script isn't already running if [[ ! "`pidof -x $(basename $0) -o %PPID`" ]]; then - LOGFILE=$(cat /opt/so/log/agents/agentstatus.log) + LOGFILE=$(cat /var/log/agents/agentstatus.log) ONLINE=$(cat $LOGFILE | grep -wF online | awk '{print $2}' | tr -d ',') ERROR=$(cat $LOGFILE | grep -wF error | awk '{print $2}' | tr -d ',') INACTIVE=$(cat $LOGFILE | grep -wF inactive | awk '{print $2}' | tr -d ',') From 333561236588306871f1d4dd605c816cdabd2a53 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 3 Apr 2024 09:54:16 -0400 Subject: [PATCH 310/777] add agentstatus to telegraf --- salt/telegraf/scripts/agentstatus.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/telegraf/scripts/agentstatus.sh b/salt/telegraf/scripts/agentstatus.sh index ccba213f6..7ba599e8a 100644 --- a/salt/telegraf/scripts/agentstatus.sh +++ b/salt/telegraf/scripts/agentstatus.sh @@ -10,7 +10,7 @@ # if this script isn't already running if [[ ! "`pidof -x $(basename $0) -o %PPID`" ]]; then - LOGFILE=$(cat /var/log/agents/agentstatus.log) + LOGFILE=$(/var/log/agents/agentstatus.log) ONLINE=$(cat $LOGFILE | grep -wF online | awk '{print $2}' | tr -d ',') ERROR=$(cat $LOGFILE | grep -wF error | awk '{print $2}' | tr -d ',') INACTIVE=$(cat $LOGFILE | grep -wF inactive | awk '{print $2}' | tr -d ',') From 64748b98adf4bdf32a17a2069487d7686c842ea1 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 3 Apr 2024 09:56:12 -0400 Subject: [PATCH 311/777] add agentstatus to telegraf --- salt/telegraf/scripts/agentstatus.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/telegraf/scripts/agentstatus.sh b/salt/telegraf/scripts/agentstatus.sh index 7ba599e8a..f57fc5f20 100644 --- a/salt/telegraf/scripts/agentstatus.sh +++ b/salt/telegraf/scripts/agentstatus.sh @@ -10,7 +10,7 @@ # if this script isn't already running if [[ ! "`pidof -x $(basename $0) -o %PPID`" ]]; then - LOGFILE=$(/var/log/agents/agentstatus.log) + LOGFILE=/var/log/agents/agentstatus.log ONLINE=$(cat $LOGFILE | grep -wF online | awk '{print $2}' | tr -d ',') ERROR=$(cat $LOGFILE | grep -wF error | awk '{print $2}' | tr -d ',') INACTIVE=$(cat $LOGFILE | grep -wF inactive | awk '{print $2}' | tr -d ',') From 976ddd39820584fda91023d5147b07aa9c722b66 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 3 Apr 2024 10:06:08 -0400 Subject: [PATCH 312/777] add agentstatus to telegraf --- salt/telegraf/scripts/agentstatus.sh | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/salt/telegraf/scripts/agentstatus.sh b/salt/telegraf/scripts/agentstatus.sh index f57fc5f20..a390552fc 100644 --- a/salt/telegraf/scripts/agentstatus.sh +++ b/salt/telegraf/scripts/agentstatus.sh @@ -11,19 +11,23 @@ if [[ ! "`pidof -x $(basename $0) -o %PPID`" ]]; then LOGFILE=/var/log/agents/agentstatus.log - ONLINE=$(cat $LOGFILE | grep -wF online | awk '{print $2}' | tr -d ',') - ERROR=$(cat $LOGFILE | grep -wF error | awk '{print $2}' | tr -d ',') - INACTIVE=$(cat $LOGFILE | grep -wF inactive | awk '{print $2}' | tr -d ',') - OFFLINE=$(cat $LOGFILE | grep -wF offline | awk '{print $2}' | tr -d ',') - UPDATING=$(cat $LOGFILE | grep -wF updating | awk '{print $2}' | tr -d ',') - UNENROLLED=$(cat $LOGFILE | grep -wF unenrolled | awk '{print $2}' | tr -d ',') - OTHER=$(cat $LOGFILE | grep -wF other | awk '{print $2}' | tr -d ',') - EVENTS=$(cat $LOGFILE | grep -wF events | awk '{print $2}' | tr -d ',') - TOTAL=$(cat $LOGFILE | grep -wF total | awk '{print $2}' | tr -d ',') - ALL=$(cat $LOGFILE | grep -wF all | awk '{print $2}' | tr -d ',') - ACTIVE=$(cat $LOGFILE | grep -wF active | awk '{print $2}') - echo "agentstatus online=$ONLINE,error=$ERROR,inactive=$INACTIVE,offline=$OFFLINE,updating=$UPDATING,unenrolled=$UNENROLLED,other=$OTHER,events=$EVENTS,total=$TOTAL,all=$ALL,active=$ACTIVE" + # Check to see if the file is there yet so we don't break install verification since there is a 5 minute delay for this file to show up + if [ -f $LOGFILE ]; then + ONLINE=$(cat $LOGFILE | grep -wF online | awk '{print $2}' | tr -d ',') + ERROR=$(cat $LOGFILE | grep -wF error | awk '{print $2}' | tr -d ',') + INACTIVE=$(cat $LOGFILE | grep -wF inactive | awk '{print $2}' | tr -d ',') + OFFLINE=$(cat $LOGFILE | grep -wF offline | awk '{print $2}' | tr -d ',') + UPDATING=$(cat $LOGFILE | grep -wF updating | awk '{print $2}' | tr -d ',') + UNENROLLED=$(cat $LOGFILE | grep -wF unenrolled | awk '{print $2}' | tr -d ',') + OTHER=$(cat $LOGFILE | grep -wF other | awk '{print $2}' | tr -d ',') + EVENTS=$(cat $LOGFILE | grep -wF events | awk '{print $2}' | tr -d ',') + TOTAL=$(cat $LOGFILE | grep -wF total | awk '{print $2}' | tr -d ',') + ALL=$(cat $LOGFILE | grep -wF all | awk '{print $2}' | tr -d ',') + ACTIVE=$(cat $LOGFILE | grep -wF active | awk '{print $2}') + + echo "agentstatus online=$ONLINE,error=$ERROR,inactive=$INACTIVE,offline=$OFFLINE,updating=$UPDATING,unenrolled=$UNENROLLED,other=$OTHER,events=$EVENTS,total=$TOTAL,all=$ALL,active=$ACTIVE" + fi fi From c712529cf6407baf76c7651531e73d30d48c2e5a Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 3 Apr 2024 10:21:35 -0400 Subject: [PATCH 313/777] suppress soup update output for cleaner console --- salt/manager/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 455fd2256..2191f1ac7 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -802,7 +802,7 @@ verify_latest_update_script() { else echo "You are not running the latest soup version. Updating soup and its components. This might take multiple runs to complete." - salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local + salt-call state.apply common.soup_scripts queue=True -lerror --file-root=$UPDATE_DIR/salt --local --out-file=/dev/null # Verify that soup scripts updated as expected get_soup_script_hashes From db106f8ca1ef7126b15e5945555fe3961796059c Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 3 Apr 2024 10:22:47 -0400 Subject: [PATCH 314/777] listen on 0.0.0.0 for CONTROLLER --- salt/kafka/etc/server.properties.jinja | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/salt/kafka/etc/server.properties.jinja b/salt/kafka/etc/server.properties.jinja index eb60eda60..3ed878766 100644 --- a/salt/kafka/etc/server.properties.jinja +++ b/salt/kafka/etc/server.properties.jinja @@ -23,7 +23,11 @@ controller.quorum.voters={{ kraft_controller_quorum_voters }} # listeners = listener_name://host_name:port # EXAMPLE: # listeners = PLAINTEXT://your.host.name:9092 -listeners=BROKER://{{ kafka_ip }}:9092,CONTROLLER://{{ kafka_ip }}:9093 + +# using 0.0.0.0 eliminates issues with binding to 9093 +listeners=BROKER://{{ kafka_ip }}:9092,CONTROLLER://0.0.0.0:9093 +#listeners=BROKER://{{ kafka_ip }}:9092,CONTROLLER://{{ kafka_ip }}:9093 + # Name of listener used for communication between brokers. inter.broker.listener.name=BROKER @@ -120,4 +124,4 @@ log.segment.bytes=1073741824 # The interval at which log segments are checked to see if they can be deleted according # to the retention policies -log.retention.check.interval.ms=300000 \ No newline at end of file +log.retention.check.interval.ms=300000 From c3f02a698ebd7d7afb5af09e1caa4a7442a773dc Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 3 Apr 2024 10:23:36 -0400 Subject: [PATCH 315/777] add kafka nodes as extra hosts for the container --- salt/kafka/enabled.sls | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/salt/kafka/enabled.sls b/salt/kafka/enabled.sls index 31d375e23..507071950 100644 --- a/salt/kafka/enabled.sls +++ b/salt/kafka/enabled.sls @@ -23,6 +23,15 @@ so-kafka: - sobridge: - ipv4_address: {{ DOCKER.containers['so-kafka'].ip }} - user: kafka + - extra_hosts: + {% for node in KAFKANODES %} + - {{ node }}:{{ KAFKANODES[node].ip }} + {% endfor %} + {% if DOCKER.containers['so-kafka'].extra_hosts %} + {% for XTRAHOST in DOCKER.containers['so-kafka'].extra_hosts %} + - {{ XTRAHOST }} + {% endfor %} + {% endif %} - port_bindings: {% for BINDING in DOCKER.containers['so-kafka'].port_bindings %} - {{ BINDING }} @@ -44,4 +53,4 @@ delete_so-kafka_so-status.disabled: test.fail_without_changes: - name: {{sls}}_state_not_allowed -{% endif %} \ No newline at end of file +{% endif %} From ed6137a76a7fb3de136a9fc9b75c558637b25d34 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 3 Apr 2024 10:24:10 -0400 Subject: [PATCH 316/777] allow sensor and searchnode to connect to manager kafka ports --- salt/firewall/defaults.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/firewall/defaults.yaml b/salt/firewall/defaults.yaml index bc380a17d..bf3a003f3 100644 --- a/salt/firewall/defaults.yaml +++ b/salt/firewall/defaults.yaml @@ -405,6 +405,7 @@ firewall: - docker_registry - influxdb - sensoroni + - kafka searchnode: portgroups: - redis @@ -418,6 +419,7 @@ firewall: - elastic_agent_data - elastic_agent_update - sensoroni + - kafka heavynode: portgroups: - redis From 18f95e867f798b57d70c8d8cfe329d7b53af6cf7 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 3 Apr 2024 10:24:53 -0400 Subject: [PATCH 317/777] port 9093 for kafka docker --- salt/docker/defaults.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/docker/defaults.yaml b/salt/docker/defaults.yaml index fea6c718f..ff130853a 100644 --- a/salt/docker/defaults.yaml +++ b/salt/docker/defaults.yaml @@ -189,7 +189,7 @@ docker: final_octet: 88 port_bindings: - 0.0.0.0:9092:9092 - - 0.0.0.0:2181:2181 + - 0.0.0.0:9093:9093 custom_bind_mounts: [] extra_hosts: [] - extra_env: [] \ No newline at end of file + extra_env: [] From 5f4a0fdfad79d5815704c1c9d6efbb84d5ccc062 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 3 Apr 2024 10:26:48 -0400 Subject: [PATCH 318/777] suppress soup update output for cleaner console --- salt/manager/tools/sbin/soup | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 2191f1ac7..cd666fe51 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -892,17 +892,17 @@ main() { echo "" require_manager - check_pillar_items + check_pillar_items > /dev/null echo "Checking to see if this is an airgap install." echo "" - check_airgap + check_airgap > /dev/null if [[ $is_airgap -eq 0 && $UNATTENDED == true && -z $ISOLOC ]]; then echo "Missing file argument (-f ) for unattended airgap upgrade." exit 0 fi - set_minionid + set_minionid > /dev/null echo "Found that Security Onion $INSTALLEDVERSION is currently installed." echo "" if [[ $is_airgap -eq 0 ]]; then From 0de1f76139d0b7a26619bca9ae41deace48107e9 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 3 Apr 2024 10:26:59 -0400 Subject: [PATCH 319/777] add agent count to reposync --- salt/common/tools/sbin/so-common | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/so-common b/salt/common/tools/sbin/so-common index 37adcef99..ebff356e5 100755 --- a/salt/common/tools/sbin/so-common +++ b/salt/common/tools/sbin/so-common @@ -248,6 +248,11 @@ get_random_value() { head -c 5000 /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $length | head -n 1 } +get_agent_count() { + + AGENTCOUNT=$(/usr/sbin/so-elasticagent-status | grep -wF active | awk '{print $2}') +} + gpg_rpm_import() { if [[ $is_oracle ]]; then if [[ "$WHATWOULDYOUSAYYAHDOHERE" == "setup" ]]; then @@ -570,8 +575,9 @@ sync_options() { set_version set_os salt_minion_count + get_agent_count - echo "$VERSION/$OS/$(uname -r)/$MINIONCOUNT/$(read_feat)" + echo "$VERSION/$OS/$(uname -r)/$MINIONCOUNT/$(read_feat)/$AGENTCOUNT" } systemctl_func() { From c4767bfdc8ebc610bd70e68838781796e6f300c4 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 3 Apr 2024 10:28:43 -0400 Subject: [PATCH 320/777] suppress soup update output for cleaner console --- salt/manager/tools/sbin/soup | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index cd666fe51..efb8d97d8 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -229,7 +229,7 @@ check_local_mods() { # {% endraw %} check_pillar_items() { - local pillar_output=$(salt-call pillar.items --out=json) + local pillar_output=$(salt-call pillar.items -lerror --out=json) cond=$(jq '.local | has("_errors")' <<< "$pillar_output") if [[ "$cond" == "true" ]]; then @@ -892,7 +892,7 @@ main() { echo "" require_manager - check_pillar_items > /dev/null + check_pillar_items echo "Checking to see if this is an airgap install." echo "" From ca57c2069136a4f2abac474bd5c2101b2efda79c Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 3 Apr 2024 10:31:24 -0400 Subject: [PATCH 321/777] suppress soup update output for cleaner console --- salt/common/tools/sbin/so-common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/so-common b/salt/common/tools/sbin/so-common index 37adcef99..e4f2a1e35 100755 --- a/salt/common/tools/sbin/so-common +++ b/salt/common/tools/sbin/so-common @@ -329,7 +329,7 @@ lookup_salt_value() { local="" fi - salt-call --no-color ${kind}.get ${group}${key} --out=${output} ${local} + salt-call -lerror --no-color ${kind}.get ${group}${key} --out=${output} ${local} } lookup_pillar() { From 105eadf111f1190eab670dad9b8fa58a0b0f2432 Mon Sep 17 00:00:00 2001 From: Wes Date: Wed, 3 Apr 2024 14:40:41 +0000 Subject: [PATCH 322/777] Add cef --- salt/elasticsearch/defaults.yaml | 44 +++++++++++++++++++++++ salt/elasticsearch/soc_elasticsearch.yaml | 1 + 2 files changed, 45 insertions(+) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index c70b0419a..b00a599d8 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -2402,6 +2402,50 @@ elasticsearch: set_priority: priority: 50 min_age: 30d + so-logs-cef_x_log: + index_sorting: False + index_template: + index_patterns: + - "logs-cef.log-*" + template: + settings: + index: + lifecycle: + name: so-logs-cef.log-logs + number_of_replicas: 0 + composed_of: + - "logs-cef.log@package" + - "logs-cef.log@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d so-logs-checkpoint_x_firewall: index_sorting: False index_template: diff --git a/salt/elasticsearch/soc_elasticsearch.yaml b/salt/elasticsearch/soc_elasticsearch.yaml index e68d0441b..dd82aad6b 100644 --- a/salt/elasticsearch/soc_elasticsearch.yaml +++ b/salt/elasticsearch/soc_elasticsearch.yaml @@ -366,6 +366,7 @@ elasticsearch: so-logs-azure_x_signinlogs: *indexSettings so-logs-azure_x_springcloudlogs: *indexSettings so-logs-barracuda_x_waf: *indexSettings + so-logs-cef_x_log: *indexSettings so-logs-cisco_asa_x_log: *indexSettings so-logs-cisco_ftd_x_log: *indexSettings so-logs-cisco_ios_x_log: *indexSettings From b863060df12ac99d51a5a37ca4a544dc216c2393 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 3 Apr 2024 11:05:24 -0400 Subject: [PATCH 323/777] kafka broker and listener on 0.0.0.0 --- salt/kafka/etc/server.properties.jinja | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/kafka/etc/server.properties.jinja b/salt/kafka/etc/server.properties.jinja index 3ed878766..0032e1b32 100644 --- a/salt/kafka/etc/server.properties.jinja +++ b/salt/kafka/etc/server.properties.jinja @@ -24,8 +24,8 @@ controller.quorum.voters={{ kraft_controller_quorum_voters }} # EXAMPLE: # listeners = PLAINTEXT://your.host.name:9092 -# using 0.0.0.0 eliminates issues with binding to 9093 -listeners=BROKER://{{ kafka_ip }}:9092,CONTROLLER://0.0.0.0:9093 +# using 0.0.0.0 eliminates issues with binding to 9092 and 9093 in initial testing +listeners=BROKER://0.0.0.0:9092,CONTROLLER://0.0.0.0:9093 #listeners=BROKER://{{ kafka_ip }}:9092,CONTROLLER://{{ kafka_ip }}:9093 From c31e459c2b4081436fe8ecad695905f9b90cd044 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 3 Apr 2024 11:06:00 -0400 Subject: [PATCH 324/777] Change metrics reporting order --- salt/common/tools/sbin/so-common | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/common/tools/sbin/so-common b/salt/common/tools/sbin/so-common index ebff356e5..ab3d777ad 100755 --- a/salt/common/tools/sbin/so-common +++ b/salt/common/tools/sbin/so-common @@ -249,7 +249,7 @@ get_random_value() { } get_agent_count() { - + AGENTCOUNT=$(/usr/sbin/so-elasticagent-status | grep -wF active | awk '{print $2}') } @@ -577,7 +577,7 @@ sync_options() { salt_minion_count get_agent_count - echo "$VERSION/$OS/$(uname -r)/$MINIONCOUNT/$(read_feat)/$AGENTCOUNT" + echo "$VERSION/$OS/$(uname -r)/$MINIONCOUNT:$AGENTCOUNT/$(read_feat)" } systemctl_func() { From 66844af1c2a6c5e5baee0cefd2d60a87907fee82 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Wed, 3 Apr 2024 11:54:53 -0400 Subject: [PATCH 325/777] FEATURE: Add dashboard for SOC Login Failures #12738 --- salt/soc/defaults.yaml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 21b107367..e6fbb742a 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1656,9 +1656,12 @@ soc: - name: Overview description: Overview of all events query: '* | groupby event.category | groupby -sankey event.category event.module | groupby event.module | groupby -sankey event.module event.dataset | groupby event.dataset | groupby observer.name | groupby host.name | groupby source.ip | groupby destination.ip | groupby destination.port' - - name: SOC Auth - description: SOC (Security Onion Console) authentication logs + - name: SOC Logins + description: SOC (Security Onion Console) logins query: 'event.dataset:kratos.audit AND msg:*authenticated* | groupby http_request.headers.x-real-ip | groupby -sankey http_request.headers.x-real-ip identity_id | groupby identity_id | groupby http_request.headers.user-agent' + - name: SOC Login Failures + description: SOC (Security Onion Console) login failures + query: 'event.dataset:kratos.audit AND msg:*Encountered*self-service*login*error* | groupby http_request.headers.x-real-ip | groupby -sankey http_request.headers.x-real-ip http_request.headers.user-agent | groupby http_request.headers.user-agent' - name: Elastalerts description: Elastalert logs query: '_index: "*:elastalert*" | groupby rule_name | groupby alert_info.type' From 8889c974b85e42f47706d6ec37aa2b3cef7935cb Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 3 Apr 2024 12:38:59 -0400 Subject: [PATCH 326/777] Change code to allow for non root --- salt/common/tools/sbin/so-common | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/salt/common/tools/sbin/so-common b/salt/common/tools/sbin/so-common index ab3d777ad..9078826e5 100755 --- a/salt/common/tools/sbin/so-common +++ b/salt/common/tools/sbin/so-common @@ -249,8 +249,11 @@ get_random_value() { } get_agent_count() { - - AGENTCOUNT=$(/usr/sbin/so-elasticagent-status | grep -wF active | awk '{print $2}') + if [ -f /opt/so/log/agents/agentstatus.log ]; then + AGENTCOUNT=$(cat /opt/so/log/agents/agentstatus.log | grep -wF active | awk '{print $2}') + else + AGENTCOUNT=0 + fi } gpg_rpm_import() { From 9078b2bad29b172708083eafbd4870d13a144d7d Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Wed, 3 Apr 2024 12:46:29 -0400 Subject: [PATCH 327/777] FEATURE: Add Events table columns for event.module kratos #12740 --- salt/soc/defaults.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index e6fbb742a..3e757e431 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -87,12 +87,13 @@ soc: - log.id.uid - network.community_id - event.dataset - ':kratos:audit': + ':kratos:': - soc_timestamp - http_request.headers.x-real-ip - identity_id - http_request.headers.user-agent - event.dataset + - msg '::conn': - soc_timestamp - source.ip From 2b8a051525b7157c005741d56f565f6cd5f36726 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 3 Apr 2024 14:30:09 -0400 Subject: [PATCH 328/777] fix link --- salt/manager/tools/sbin/soup | 6 +++--- setup/so-whiptail | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index efb8d97d8..58e6bd006 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -598,7 +598,7 @@ used and enables informed prioritization of future development. Adjust this setting at anytime via the SOC Configuration screen. -Additional information: https://docs.securityonion.net/telemetry.rst. +Additional information: https://docs.securityonion.net/en/2.4/telemetry.html ASSIST_EOF @@ -896,13 +896,13 @@ main() { echo "Checking to see if this is an airgap install." echo "" - check_airgap > /dev/null + check_airgap if [[ $is_airgap -eq 0 && $UNATTENDED == true && -z $ISOLOC ]]; then echo "Missing file argument (-f ) for unattended airgap upgrade." exit 0 fi - set_minionid > /dev/null + set_minionid echo "Found that Security Onion $INSTALLEDVERSION is currently installed." echo "" if [[ $is_airgap -eq 0 ]]; then diff --git a/setup/so-whiptail b/setup/so-whiptail index 5fa5bf343..e79f35d2f 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -156,7 +156,7 @@ whiptail_accept_telemetry() { Adjust this setting at anytime via the SOC Configuration screen. - Additional information: https://docs.securityonion.net/telemetry.rst. + Additional information: https://docs.securityonion.net/en/2.4/telemetry.html Enable SOC Telemetry to help improve future releases? EOM From dc27bbb01dd7b3e97980160572b7f217aa0bfa7a Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 3 Apr 2024 14:30:52 -0400 Subject: [PATCH 329/777] Set kafka heap size. To be later configured from SOC Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/enabled.sls | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/kafka/enabled.sls b/salt/kafka/enabled.sls index 507071950..723f2be9a 100644 --- a/salt/kafka/enabled.sls +++ b/salt/kafka/enabled.sls @@ -23,6 +23,8 @@ so-kafka: - sobridge: - ipv4_address: {{ DOCKER.containers['so-kafka'].ip }} - user: kafka + - environment: + - KAFKA_HEAP_OPTS=-Xmx2G -Xms1G - extra_hosts: {% for node in KAFKANODES %} - {{ node }}:{{ KAFKANODES[node].ip }} From 13105c4ab31f045acbef2986c59bc4bf431aa2ec Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 3 Apr 2024 14:34:07 -0400 Subject: [PATCH 330/777] Generate certs for use with elasticfleet kafka output policy Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/ssl/init.sls | 40 +++++++++------------------------------- 1 file changed, 9 insertions(+), 31 deletions(-) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index 2a71cd853..e7b01bcd2 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -702,27 +702,26 @@ kafka_crt: - onchanges: - x509: /etc/pki/kafka.key -# Kafka needs a keystore so just creating a new key / cert for that purpose -etc_kafka_logstash_key: +elasticfleet_kafka_key: x509.private_key_managed: - - name: /etc/pki/kafka-logstash.key + - name: /etc/pki/elasticfleet-kafka.keyn - keysize: 4096 - backup: True - new: True - {% if salt['file.file_exists']('/etc/pki/kakfa-logstash.key') -%} + {% if salt['file.file_exists']('/etc/pki/elasticfleet-kafka.key') -%} - prereq: - - x509: etc_kafka_logstash_crt + - x509: elasticfleet_kafka_crt {%- endif %} - retry: attempts: 5 interval: 30 -etc_kafka_logstash_crt: +elasticfleet_kafka_crt: x509.certificate_managed: - - name: /etc/pki/kafka-logstash.crt + - name: /etc/pki/elasticfleet-kafka.crt - ca_server: {{ ca_server }} - signing_policy: elasticfleet - - private_key: /etc/pki/kafka-logstash.key + - private_key: /etc/pki/elasticfleet-kafka.key - CN: {{ GLOBALS.hostname }} - subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }} - days_remaining: 0 @@ -732,10 +731,6 @@ etc_kafka_logstash_crt: - retry: attempts: 5 interval: 30 - cmd.run: - - name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka-logstash.key -in /etc/pki/kafka-logstash.crt -export -out /etc/pki/kafka-logstash.p12 -nodes -passout pass:" - - onchanges: - - x509: etc_kafka_logstash_key kafka_key_perms: file.managed: @@ -756,7 +751,7 @@ kafka_crt_perms: kafka_logstash_cert_perms: file.managed: - replace: False - - name: /etc/pki/kafka-logstash.crt + - name: /etc/pki/elasticfleet-kafka.crt - mode: 640 - user: 960 - group: 939 @@ -764,27 +759,10 @@ kafka_logstash_cert_perms: kafka_logstash_key_perms: file.managed: - replace: False - - name: /etc/pki/kafka-logstash.key + - name: /etc/pki/elasticfleet-kafka.key - mode: 640 - user: 960 - group: 939 - -kafka_logstash_keystore_perms: - file.managed: - - replace: False - - name: /etc/pki/kafka-logstash.p12 - - mode: 640 - - user: 960 - - group: 939 - -kafka_keystore_perms: - file.managed: - - replace: False - - name: /etc/pki/kafka.p12 - - mode: 640 - - user: 960 - - group: 939 - {% endif %} {% else %} From 941a841da0f9f4d5ce8256acb7513ffb09143922 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 3 Apr 2024 14:41:57 -0400 Subject: [PATCH 331/777] fix link --- salt/manager/tools/sbin/soup | 2 +- setup/so-whiptail | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 58e6bd006..bf2e74bf7 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -598,7 +598,7 @@ used and enables informed prioritization of future development. Adjust this setting at anytime via the SOC Configuration screen. -Additional information: https://docs.securityonion.net/en/2.4/telemetry.html +Read more: https://docs.securityonion.net/en/2.4/telemetry.html ASSIST_EOF diff --git a/setup/so-whiptail b/setup/so-whiptail index e79f35d2f..de1cfec83 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -156,7 +156,7 @@ whiptail_accept_telemetry() { Adjust this setting at anytime via the SOC Configuration screen. - Additional information: https://docs.securityonion.net/en/2.4/telemetry.html + Documentation: https://docs.securityonion.net/en/2.4/telemetry.html Enable SOC Telemetry to help improve future releases? EOM From 8f8896c505d43092c5ef223278f8d3f11ba28363 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 3 Apr 2024 14:45:39 -0400 Subject: [PATCH 332/777] fix link --- salt/manager/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index bf2e74bf7..99191a442 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -598,7 +598,7 @@ used and enables informed prioritization of future development. Adjust this setting at anytime via the SOC Configuration screen. -Read more: https://docs.securityonion.net/en/2.4/telemetry.html +Documentation: https://docs.securityonion.net/en/2.4/telemetry.html ASSIST_EOF From 0f50a265cf2ab21dd1eee2ca846d64c0745622c6 Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Wed, 3 Apr 2024 13:12:18 -0600 Subject: [PATCH 333/777] Update SOC Config with State File Paths Each detection engine is getting a state file to help manage the timer over restarts. By default, the files will go in soc's config folder inside a fingerprints folder. --- salt/soc/defaults.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 4ac77229e..0c9d8506e 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1247,6 +1247,7 @@ soc: elastAlertRulesFolder: /opt/sensoroni/elastalert reposFolder: /opt/sensoroni/sigma/repos rulesFingerprintFile: /opt/sensoroni/fingerprints/sigma.fingerprint + stateFilePath: /opt/so/conf/soc/fingerprints/elastalertengine.state rulesRepos: - repo: https://github.com/Security-Onion-Solutions/securityonion-resources license: Elastic-2.0 @@ -1307,6 +1308,7 @@ soc: - repo: https://github.com/Security-Onion-Solutions/securityonion-yara license: DRL yaraRulesFolder: /opt/sensoroni/yara/rules + stateFilePath: /opt/so/conf/soc/fingerprints/strelkaengine.state suricataengine: allowRegex: '' autoUpdateEnabled: true @@ -1314,6 +1316,7 @@ soc: communityRulesFile: /nsm/rules/suricata/emerging-all.rules denyRegex: '' rulesFingerprintFile: /opt/sensoroni/fingerprints/emerging-all.fingerprint + stateFilePath: /opt/so/conf/soc/fingerprints/suricataengine.state client: enableReverseLookup: false docsUrl: /docs/ From 9db9af27aed759eaceeaf0132608c8fc4e2d0c97 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 3 Apr 2024 15:14:50 -0400 Subject: [PATCH 334/777] Attempt to fix 2.3 when main repo changes --- salt/manager/tools/sbin/soup | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index a585f877c..9e972b4bc 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -837,7 +837,16 @@ main() { echo "### Preparing soup at $(date) ###" echo "" - + if [[ "$INSTALLEDVERISON" =~ ^2.3.* ]]; then + BRANCH="2.3/main" + cd $UPDATE_DIR + clone_to_tmp + cp $UPDATE_DIR/salt/common/tools/sbin/soup /usr/sbin + cp $UPDATE_DIR/salt/common/tools/sbin/soup /opt/so/saltstack/default/salt/common/tools/sbin + add_common + echo "Please run soup again" + exit 0 + fi set_os check_salt_master_status 1 || fail "Could not talk to salt master: Please run 'systemctl status salt-master' to ensure the salt-master service is running and check the log at /opt/so/log/salt/master." From 9c59f42c16a9232abf1e6de8f03c941f617a46bd Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 3 Apr 2024 15:23:09 -0400 Subject: [PATCH 335/777] Attempt to fix 2.3 when main repo changes --- salt/manager/tools/sbin/soup | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 9e972b4bc..fd5ee8ead 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -841,8 +841,8 @@ main() { BRANCH="2.3/main" cd $UPDATE_DIR clone_to_tmp - cp $UPDATE_DIR/salt/common/tools/sbin/soup /usr/sbin - cp $UPDATE_DIR/salt/common/tools/sbin/soup /opt/so/saltstack/default/salt/common/tools/sbin + cp $UPDATE_DIR/securityonion/salt/common/tools/sbin/soup /usr/sbin + cp $UPDATE_DIR/securityonion/salt/common/tools/sbin/soup /opt/so/saltstack/default/salt/common/tools/sbin add_common echo "Please run soup again" exit 0 From 1b8584d4bbbce5432a3114c8096cd757b3c871d4 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 3 Apr 2024 15:36:35 -0400 Subject: [PATCH 336/777] allow manager to manager on kafka ports --- salt/firewall/defaults.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/firewall/defaults.yaml b/salt/firewall/defaults.yaml index bf3a003f3..e51bf5825 100644 --- a/salt/firewall/defaults.yaml +++ b/salt/firewall/defaults.yaml @@ -370,6 +370,7 @@ firewall: - elastic_agent_update - localrules - sensoroni + - kafka fleet: portgroups: - elasticsearch_rest From 12da7db22c57744a73d48b6028823c98491c5ec0 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 3 Apr 2024 15:38:23 -0400 Subject: [PATCH 337/777] Attempt to fix 2.3 when main repo changes --- salt/manager/tools/sbin/soup | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index fd5ee8ead..cbe3d461a 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -837,7 +837,8 @@ main() { echo "### Preparing soup at $(date) ###" echo "" - if [[ "$INSTALLEDVERISON" =~ ^2.3.* ]]; then + if [[ "$INSTALLEDVERSION" =~ ^2.3 ]]; then + echo "Actually running the code" BRANCH="2.3/main" cd $UPDATE_DIR clone_to_tmp From f66cca96ce935cd6ff1e95a5516514527018c387 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Wed, 3 Apr 2024 16:17:29 -0400 Subject: [PATCH 338/777] YARA casing --- salt/soc/defaults.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 1d0eb0e38..9bb302057 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -2053,7 +2053,7 @@ soc: query: "so_detection.language:sigma" - name: "Detection Type - Sigma - Windows" query: 'so_detection.language:sigma AND so_detection.content: "*product: windows*"' - - name: "Detection Type - Yara (Strelka)" + - name: "Detection Type - YARA (Strelka)" query: "so_detection.language:yara" - name: "Security Onion - Grid Detections" query: "so_detection.ruleset:securityonion-resources" From 7c64fc8c05fc8e16592ae4344f49226acc7145bc Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 3 Apr 2024 18:08:42 -0400 Subject: [PATCH 339/777] do not prompt about telemetry on airgap installs --- setup/so-setup | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/setup/so-setup b/setup/so-setup index fc13e5b18..1c3be22bf 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -447,7 +447,7 @@ if ! [[ -f $install_opt_file ]]; then get_redirect # Does the user want to allow access to the UI? collect_so_allow - whiptail_accept_telemetry + [[ ! $is_airgap ]] && whiptail_accept_telemetry whiptail_end_settings elif [[ $is_standalone ]]; then waitforstate=true @@ -469,7 +469,7 @@ if ! [[ -f $install_opt_file ]]; then collect_webuser_inputs get_redirect collect_so_allow - whiptail_accept_telemetry + [[ ! $is_airgap ]] && whiptail_accept_telemetry whiptail_end_settings elif [[ $is_manager ]]; then info "Setting up as node type manager" @@ -490,7 +490,7 @@ if ! [[ -f $install_opt_file ]]; then collect_webuser_inputs get_redirect collect_so_allow - whiptail_accept_telemetry + [[ ! $is_airgap ]] && whiptail_accept_telemetry whiptail_end_settings elif [[ $is_managersearch ]]; then info "Setting up as node type managersearch" @@ -511,7 +511,7 @@ if ! [[ -f $install_opt_file ]]; then collect_webuser_inputs get_redirect collect_so_allow - whiptail_accept_telemetry + [[ ! $is_airgap ]] && whiptail_accept_telemetry whiptail_end_settings elif [[ $is_sensor ]]; then info "Setting up as node type sensor" @@ -601,7 +601,7 @@ if ! [[ -f $install_opt_file ]]; then collect_webuser_inputs get_redirect collect_so_allow - whiptail_accept_telemetry + [[ ! $is_airgap ]] && whiptail_accept_telemetry whiptail_end_settings elif [[ $is_receiver ]]; then From 1d221a574b9150915de87aa0700dc36a687d5ba9 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Thu, 4 Apr 2024 06:48:25 -0400 Subject: [PATCH 340/777] Exclude Elastalert EQL errors --- salt/common/tools/sbin/so-log-check | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index a4b25f0f3..cc5fef85b 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -198,6 +198,8 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|req.LocalMeta.host.ip" # known issue in GH EXCLUDED_ERRORS="$EXCLUDED_ERRORS|sendmail" # zeek EXCLUDED_ERRORS="$EXCLUDED_ERRORS|stats.log" + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unknown column" # Elastalert errors from running EQL queries + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|parsing_exception" # Elastalert EQL parsing issue. Temp. EXCLUDED_ERRORS="$EXCLUDED_ERRORS|context deadline exceeded" fi From a9517e1291b6fc67c5805c066e2092236583b8f2 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 4 Apr 2024 07:49:30 -0400 Subject: [PATCH 341/777] clarify telemetry annotation --- salt/soc/soc_soc.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index eed0113fc..c013b66e8 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -4,7 +4,7 @@ soc: advanced: True telemetryEnabled: title: SOC Telemetry - description: When enabled, SOC provides feature usage data to the Security Onion development team via Google Analytics. This data helps Security Onion developers determine which product features are being used and can also provide insight into improving the user interface. When changing this setting, wait for the grid to fully synchronize and then perform a hard browser refresh on SOC, to force the browser cache to update and reflect the new setting. + description: When enabled and not in airgap mode, SOC provides feature usage data to the Security Onion development team via Google Analytics. This data helps Security Onion developers determine which product features are being used and can also provide insight into improving the user interface. When changing this setting, wait for the grid to fully synchronize and then perform a hard browser refresh on SOC, to force the browser cache to update and reflect the new setting. global: True helpLink: telemetry.html files: From a7fab380b40ba2df587a97e12f470b66a48b55d8 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 4 Apr 2024 07:51:23 -0400 Subject: [PATCH 342/777] clarify telemetry annotation --- salt/soc/soc_soc.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index c013b66e8..a9f0db6c2 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -4,7 +4,7 @@ soc: advanced: True telemetryEnabled: title: SOC Telemetry - description: When enabled and not in airgap mode, SOC provides feature usage data to the Security Onion development team via Google Analytics. This data helps Security Onion developers determine which product features are being used and can also provide insight into improving the user interface. When changing this setting, wait for the grid to fully synchronize and then perform a hard browser refresh on SOC, to force the browser cache to update and reflect the new setting. + description: When this setting is enabled and the grid is not in airgap mode, SOC will provide feature usage data to the Security Onion development team via Google Analytics. This data helps Security Onion developers determine which product features are being used and can also provide insight into improving the user interface. When changing this setting, wait for the grid to fully synchronize and then perform a hard browser refresh on SOC, to force the browser cache to update and reflect the new setting. global: True helpLink: telemetry.html files: From 14c824143bfb821f1b553fa24d68605c15461556 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 4 Apr 2024 08:48:44 -0400 Subject: [PATCH 343/777] Attempt to fix 2.3 when main repo changes --- salt/common/soup_scripts.sls | 7 +++++++ salt/manager/tools/sbin/soup | 11 ----------- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/salt/common/soup_scripts.sls b/salt/common/soup_scripts.sls index fd32b8a28..99358a1a1 100644 --- a/salt/common/soup_scripts.sls +++ b/salt/common/soup_scripts.sls @@ -1,3 +1,5 @@ +{% if '2.4' in salt['cp.get_file_str']('/etc/soversion') %} + {% import_yaml '/opt/so/saltstack/local/pillar/global/soc_global.sls' as SOC_GLOBAL %} {% if SOC_GLOBAL.global.airgap %} {% set UPDATE_DIR='/tmp/soagupdate/SecurityOnion' %} @@ -68,3 +70,8 @@ copy_so-firewall_sbin: - source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-firewall - force: True - preserve: True +{% else %} +fix_old_versions: + cmd.run: + - name: BRANCH=2.3/main soup -y +{% endif %} \ No newline at end of file diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index cbe3d461a..a4343faf4 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -837,17 +837,6 @@ main() { echo "### Preparing soup at $(date) ###" echo "" - if [[ "$INSTALLEDVERSION" =~ ^2.3 ]]; then - echo "Actually running the code" - BRANCH="2.3/main" - cd $UPDATE_DIR - clone_to_tmp - cp $UPDATE_DIR/securityonion/salt/common/tools/sbin/soup /usr/sbin - cp $UPDATE_DIR/securityonion/salt/common/tools/sbin/soup /opt/so/saltstack/default/salt/common/tools/sbin - add_common - echo "Please run soup again" - exit 0 - fi set_os check_salt_master_status 1 || fail "Could not talk to salt master: Please run 'systemctl status salt-master' to ensure the salt-master service is running and check the log at /opt/so/log/salt/master." From c2f7f7e3a5dbd562c28691722ef4ff07858a2954 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Thu, 4 Apr 2024 08:52:30 -0400 Subject: [PATCH 344/777] Remove dup line --- salt/manager/tools/sbin/soup | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 176c290a8..dba3215d1 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -375,7 +375,6 @@ postupgrade_changes() { [[ "$POSTVERSION" == 2.4.40 ]] && post_to_2.4.50 [[ "$POSTVERSION" == 2.4.50 ]] && post_to_2.4.60 [[ "$POSTVERSION" == 2.4.60 ]] && post_to_2.4.70 - [[ "$POSTVERSION" == 2.4.60 ]] && post_to_2.4.70 true } From 4b31632dfc3764773763c2808ccec802520eeee6 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 4 Apr 2024 08:52:37 -0400 Subject: [PATCH 345/777] Attempt to fix 2.3 when main repo changes --- salt/common/soup_scripts.sls | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/common/soup_scripts.sls b/salt/common/soup_scripts.sls index 99358a1a1..da1f5199b 100644 --- a/salt/common/soup_scripts.sls +++ b/salt/common/soup_scripts.sls @@ -73,5 +73,6 @@ copy_so-firewall_sbin: {% else %} fix_old_versions: cmd.run: - - name: BRANCH=2.3/main soup -y + - env: BRANCH=2.3/main + - name: soup -y {% endif %} \ No newline at end of file From d3f163bf9e1e46c9baa075a8cbaa73479edb9aa4 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 4 Apr 2024 08:54:04 -0400 Subject: [PATCH 346/777] Attempt to fix 2.3 when main repo changes --- salt/common/soup_scripts.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/common/soup_scripts.sls b/salt/common/soup_scripts.sls index da1f5199b..c8489ac64 100644 --- a/salt/common/soup_scripts.sls +++ b/salt/common/soup_scripts.sls @@ -73,6 +73,6 @@ copy_so-firewall_sbin: {% else %} fix_old_versions: cmd.run: - - env: BRANCH=2.3/main + - env: "BRANCH=2.3/main" - name: soup -y {% endif %} \ No newline at end of file From 470b0e4bf68415835f2b2c0f5e2b311ba120425c Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 4 Apr 2024 08:55:13 -0400 Subject: [PATCH 347/777] Attempt to fix 2.3 when main repo changes --- salt/common/soup_scripts.sls | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/common/soup_scripts.sls b/salt/common/soup_scripts.sls index c8489ac64..fe378c38f 100644 --- a/salt/common/soup_scripts.sls +++ b/salt/common/soup_scripts.sls @@ -73,6 +73,7 @@ copy_so-firewall_sbin: {% else %} fix_old_versions: cmd.run: - - env: "BRANCH=2.3/main" + - env: + - BRANCH: "2.3/main" - name: soup -y {% endif %} \ No newline at end of file From 7668fa1396dce0757e370d6e8ef0448a0f18a13c Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 4 Apr 2024 09:03:29 -0400 Subject: [PATCH 348/777] Attempt to fix 2.3 when main repo changes --- salt/common/soup_scripts.sls | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/salt/common/soup_scripts.sls b/salt/common/soup_scripts.sls index fe378c38f..f36ddf47b 100644 --- a/salt/common/soup_scripts.sls +++ b/salt/common/soup_scripts.sls @@ -71,9 +71,10 @@ copy_so-firewall_sbin: - force: True - preserve: True {% else %} -fix_old_versions: +fix_23_soup_sbin: cmd.run: - - env: - - BRANCH: "2.3/main" - - name: soup -y + - name: curl -s -o /usr/sbin/soup https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.3/main/salt/common/tools/sbin/soup +fix_23_soup_salt: + cmd.run: + - name: curl -s -o /opt/so/saltstack/defalt/salt/common/tools/sbin/soup https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.3/main/salt/common/tools/sbin/soup {% endif %} \ No newline at end of file From 5ec3b834fb16d0ac862d40589ebf4bf3d667fbaa Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Thu, 4 Apr 2024 09:11:41 -0400 Subject: [PATCH 349/777] FEATURE: Add Events table columns for event.module sigma #12743 --- salt/soc/defaults.yaml | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 0c9d8506e..17d4464d4 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1222,6 +1222,17 @@ soc: - event_data.destination.port - event_data.process.executable - event_data.process.pid + ':sigma:': + - soc_timestamp + - rule.name + - event.severity_label + - event_data.event.dataset + - event_data.source.ip + - event_data.source.port + - event_data.destination.host + - event_data.destination.port + - event_data.process.executable + - event_data.process.pid server: bindAddress: 0.0.0.0:9822 baseUrl: / @@ -1915,6 +1926,17 @@ soc: - event_data.destination.port - event_data.process.executable - event_data.process.pid + ':sigma:': + - soc_timestamp + - rule.name + - event.severity_label + - event_data.event.dataset + - event_data.source.ip + - event_data.source.port + - event_data.destination.host + - event_data.destination.port + - event_data.process.executable + - event_data.process.pid ':strelka:': - soc_timestamp - file.name From c4ebe25bab42761482c63461344cee10ae35f703 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 4 Apr 2024 09:18:37 -0400 Subject: [PATCH 350/777] Attempt to fix 2.3 when main repo changes --- salt/common/soup_scripts.sls | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/common/soup_scripts.sls b/salt/common/soup_scripts.sls index f36ddf47b..e86edfad1 100644 --- a/salt/common/soup_scripts.sls +++ b/salt/common/soup_scripts.sls @@ -73,8 +73,8 @@ copy_so-firewall_sbin: {% else %} fix_23_soup_sbin: cmd.run: - - name: curl -s -o /usr/sbin/soup https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.3/main/salt/common/tools/sbin/soup + - name: curl -s -f -o /usr/sbin/soup https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.3/main/salt/common/tools/sbin/soup fix_23_soup_salt: cmd.run: - - name: curl -s -o /opt/so/saltstack/defalt/salt/common/tools/sbin/soup https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.3/main/salt/common/tools/sbin/soup + - name: curl -s -f -o /opt/so/saltstack/defalt/salt/common/tools/sbin/soup https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.3/main/salt/common/tools/sbin/soup {% endif %} \ No newline at end of file From 784ec54795008dd2f37aaedee364fc946e3de586 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 4 Apr 2024 09:24:17 -0400 Subject: [PATCH 351/777] 2.3 updates --- salt/common/soup_scripts.sls | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/salt/common/soup_scripts.sls b/salt/common/soup_scripts.sls index d0f8589a0..90ee059a4 100644 --- a/salt/common/soup_scripts.sls +++ b/salt/common/soup_scripts.sls @@ -1,11 +1,11 @@ {% if '2.4' in salt['cp.get_file_str']('/etc/soversion') %} -{% import_yaml '/opt/so/saltstack/local/pillar/global/soc_global.sls' as SOC_GLOBAL %} -{% if SOC_GLOBAL.global.airgap %} -{% set UPDATE_DIR='/tmp/soagupdate/SecurityOnion' %} -{% else %} -{% set UPDATE_DIR='/tmp/sogh/securityonion' %} -{% endif %} +{% import_yaml '/opt/so/saltstack/local/pillar/global/soc_global.sls' as SOC_GLOBAL %} +{% if SOC_GLOBAL.global.airgap %} +{% set UPDATE_DIR='/tmp/soagupdate/SecurityOnion' %} +{% else %} +{% set UPDATE_DIR='/tmp/sogh/securityonion' %} +{% endif %} remove_common_soup: file.absent: From d8ac3f1292b0ffa2aaec798e4dbb9d19eb6f29dd Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Thu, 4 Apr 2024 09:30:05 -0400 Subject: [PATCH 352/777] FEATURE: Add dashboards specific to Elastic Agent #12746 --- salt/soc/defaults.yaml | 48 ++++++++++++++++++++++++++---------------- 1 file changed, 30 insertions(+), 18 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 17d4464d4..7c5c5b044 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1678,18 +1678,36 @@ soc: - name: SOC Login Failures description: SOC (Security Onion Console) login failures query: 'event.dataset:kratos.audit AND msg:*Encountered*self-service*login*error* | groupby http_request.headers.x-real-ip | groupby -sankey http_request.headers.x-real-ip http_request.headers.user-agent | groupby http_request.headers.user-agent' - - name: Elastalerts - description: Elastalert logs - query: '_index: "*:elastalert*" | groupby rule_name | groupby alert_info.type' - name: Alerts description: Overview of all alerts query: 'tags:alert | groupby event.module* | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby rule.name | groupby event.severity | groupby destination_geo.organization_name' - name: NIDS Alerts description: NIDS (Network Intrusion Detection System) alerts query: 'event.category:network AND tags:alert | groupby rule.category | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby rule.name | groupby rule.uuid | groupby rule.gid | groupby destination_geo.organization_name' - - name: Sysmon Overview - description: Overview of all Sysmon data types - query: 'event.dataset:windows.sysmon_operational | groupby event.action | groupby -sankey event.action host.name | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby event.category event.action | groupby dns.question.name | groupby process.executable | groupby file.name | groupby source.ip | groupby destination.ip | groupby destination.port' + - name: Elastic Agent Overview + description: Overview of all events from Elastic Agents + query: 'event.module:endpoint | groupby event.dataset | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby -sankey user.name process.name | groupby process.name' + - name: Elastic Agent API Events + description: API (Application Programming Interface) events from Elastic Agents + query: 'event.dataset:endpoint.events.api | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby -sankey user.name process.name | groupby process.name | groupby process.Ext.api.name' + - name: Elastic Agent File Events + description: File events from Elastic Agents + query: 'event.dataset:endpoint.events.file | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby -sankey user.name process.name | groupby process.name | groupby event.action | groupby file.path' + - name: Elastic Agent Library Events + description: Library events from Elastic Agents + query: 'event.dataset:endpoint.events.library | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby -sankey user.name process.name | groupby process.name | groupby event.action | groupby dll.path | groupby dll.code_signature.status | groupby dll.code_signature.subject_name' + - name: Elastic Agent Network Events + description: Network events from Elastic Agents + query: 'event.dataset:endpoint.events.network | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby -sankey user.name process.name | groupby process.name | groupby event.action | groupby source.ip | groupby destination.ip | groupby destination.port' + - name: Elastic Agent Process Events + description: Process events from Elastic Agents + query: 'event.dataset:endpoint.events.process | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby -sankey user.name process.parent.name | groupby process.parent.name | groupby -sankey process.parent.name process.name | groupby process.name | groupby event.action | groupby process.working_directory' + - name: Elastic Agent Registry Events + description: Registry events from Elastic Agents + query: 'event.dataset:endpoint.events.registry | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby -sankey user.name process.name | groupby process.name | groupby event.action | groupby registry.path' + - name: Elastic Agent Security Events + description: Security events from Elastic Agents + query: 'event.dataset:endpoint.events.security | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby -sankey user.name process.executable | groupby process.executable | groupby event.action | groupby event.outcome' - name: Host Overview description: Overview of all host data types query: '((event.category:registry OR event.category:host OR event.category:process OR event.category:driver OR event.category:configuration) OR (event.category:file AND _exists_:process.executable) OR (event.category:network AND _exists_:host.name)) | groupby event.dataset* event.category* event.action* | groupby event.type | groupby -sankey event.type host.name | groupby host.name | groupby user.name | groupby file.name | groupby process.executable' @@ -1708,24 +1726,18 @@ soc: - name: Host Network & Process Mappings description: Network activity mapped to originating processes query: 'event.category: network AND _exists_:process.executable | groupby event.action | groupby -sankey event.action host.name | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby event.dataset* event.type* event.action* | groupby dns.question.name | groupby process.executable | groupby process.name | groupby source.ip | groupby destination.ip | groupby destination.port' - - name: Host API Events - description: API (Application Programming Interface) events from endpoints - query: 'event.dataset:endpoint.events.api | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby -sankey user.name process.name | groupby process.name | groupby process.Ext.api.name' - - name: Host Library Events - description: Library events from endpoints - query: 'event.dataset:endpoint.events.library | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby -sankey user.name process.name | groupby process.name | groupby event.action | groupby dll.path | groupby dll.code_signature.status | groupby dll.code_signature.subject_name' - - name: Host Security Events - description: Security events from endpoints - query: 'event.dataset:endpoint.events.security | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby -sankey user.name process.executable | groupby process.executable | groupby event.action | groupby event.outcome' + - name: Sysmon Overview + description: Overview of all Sysmon data types + query: 'event.dataset:windows.sysmon_operational | groupby event.action | groupby -sankey event.action host.name | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby event.category event.action | groupby dns.question.name | groupby process.executable | groupby file.name | groupby source.ip | groupby destination.ip | groupby destination.port' - name: Strelka description: Strelka file analysis query: 'event.module:strelka | groupby file.mime_type | groupby -sankey file.mime_type file.source | groupby file.source | groupby -sankey file.source file.name | groupby file.name' - name: Zeek Notice description: Zeek notice logs query: 'event.dataset:zeek.notice | groupby notice.note | groupby -sankey notice.note source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby notice.message | groupby notice.sub_message | groupby source_geo.organization_name | groupby destination_geo.organization_name' - - name: Connections and Metadata with community_id - description: Network connections that include community_id - query: '_exists_:network.community_id | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid' + - name: Connections and Metadata with Community ID + description: Network connections that include network.community_id + query: '_exists_:network.community_id | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip | groupby destination.ip | groupby destination.port | groupby network.protocol | groupby source_geo.organization_name | groupby source.geo.country_name | groupby destination_geo.organization_name | groupby destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid' - name: Connections seen by Zeek or Suricata description: Network connections logged by Zeek or Suricata query: 'tags:conn | groupby source.ip | groupby destination.ip | groupby destination.port | groupby -sankey destination.port network.protocol | groupby network.protocol | groupby network.transport | groupby connection.history | groupby connection.state | groupby connection.state_description | groupby source.geo.country_name | groupby destination.geo.country_name | groupby client.ip_bytes | groupby server.ip_bytes | groupby client.oui' From 6046848ee70d81aead52d89077decb2f5aecd826 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 4 Apr 2024 10:25:32 -0400 Subject: [PATCH 353/777] skip telemetry summary in airgap mode --- setup/so-whiptail | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/setup/so-whiptail b/setup/so-whiptail index de1cfec83..90bbaf397 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -451,10 +451,12 @@ whiptail_end_settings() { done fi - if [[ $telemetry -eq 0 ]]; then - __append_end_msg "SOC Telemetry: enabled" - else - __append_end_msg "SOC Telemetry: disabled" + if [[ ! $is_airgap ]]; then + if [[ $telemetry -eq 0 ]]; then + __append_end_msg "SOC Telemetry: enabled" + else + __append_end_msg "SOC Telemetry: disabled" + fi fi # ADVANCED From 49d5fa95a2953638eb75da5dc39e8ad460012041 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Thu, 4 Apr 2024 11:26:44 -0400 Subject: [PATCH 354/777] Detections tweaks --- salt/soc/defaults.yaml | 4 ++-- salt/soc/merged.map.jinja | 5 +++++ salt/soc/soc_soc.yaml | 5 +++++ 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 9bb302057..603fc5910 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -2049,9 +2049,9 @@ soc: query: "so_detection.isEnabled:false" - name: "Detection Type - Suricata (NIDS)" query: "so_detection.language:suricata" - - name: "Detection Type - Sigma - All" + - name: "Detection Type - Sigma (Elastalert) - All" query: "so_detection.language:sigma" - - name: "Detection Type - Sigma - Windows" + - name: "Detection Type - Sigma (Elastalert) - Windows" query: 'so_detection.language:sigma AND so_detection.content: "*product: windows*"' - name: "Detection Type - YARA (Strelka)" query: "so_detection.language:yara" diff --git a/salt/soc/merged.map.jinja b/salt/soc/merged.map.jinja index b2362a20e..4cd08a218 100644 --- a/salt/soc/merged.map.jinja +++ b/salt/soc/merged.map.jinja @@ -30,6 +30,11 @@ {# since cases is not a valid soc config item and only used for the map files, remove it from being placed in the config #} {% do SOCMERGED.config.server.modules.pop('cases') %} +{# do not automatically enable Sigma rules if install is Eval or Import #} +{% if grains['role'] in ['so-eval', 'so-import'] %} + {% do SOCMERGED.config.server.modules.elastalertengine.update({'autoEnabledSigmaRules': ""}) %} +{% endif %} + {# remove these modules if detections is disabled #} {% if not SOCMERGED.config.server.client.detectionsEnabled %} {% do SOCMERGED.config.server.modules.pop('elastalertengine') %} diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index eae52e31b..ec3f68f3f 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -83,6 +83,11 @@ soc: global: True advanced: True helpLink: sigma.html + autoEnabledSigmaRules: + description: 'Sigma rules to automatically enable on initial import. Format is $Ruleset+$Level - for example, for the core community ruleset and critical level rules: core+critical' + global: True + advanced: True + helpLink: sigma.html denyRegex: description: 'Regex used to filter imported Sigma rules. Deny regex takes precedence over the Allow regex setting.' global: True From 7a6b72ebac68f899af097eb5b575dc88e291977b Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 4 Apr 2024 15:46:11 -0400 Subject: [PATCH 355/777] add so-kafka to manager for firewall --- salt/firewall/containers.map.jinja | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/firewall/containers.map.jinja b/salt/firewall/containers.map.jinja index 566f55a40..7efb9abab 100644 --- a/salt/firewall/containers.map.jinja +++ b/salt/firewall/containers.map.jinja @@ -27,6 +27,7 @@ 'so-elastic-fleet', 'so-elastic-fleet-package-registry', 'so-influxdb', + 'so-kafka', 'so-kibana', 'so-kratos', 'so-logstash', From 4c5b42b898c062ff76dbbdd5eb24d81d3fb315e9 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 4 Apr 2024 15:47:01 -0400 Subject: [PATCH 356/777] restart container on server config changes --- salt/kafka/enabled.sls | 2 ++ salt/ssl/init.sls | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/salt/kafka/enabled.sls b/salt/kafka/enabled.sls index 723f2be9a..49a0a9bbd 100644 --- a/salt/kafka/enabled.sls +++ b/salt/kafka/enabled.sls @@ -43,6 +43,8 @@ so-kafka: - /opt/so/conf/ca/cacerts:/etc/pki/java/sos/cacerts - /nsm/kafka/data/:/nsm/kafka/data/:rw - /opt/so/conf/kafka/server.properties:/kafka/config/kraft/server.properties + - watch: + - file: kafka_kraft_server_properties delete_so-kafka_so-status.disabled: file.uncomment: diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index e7b01bcd2..3b5ebb2b0 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -704,7 +704,7 @@ kafka_crt: elasticfleet_kafka_key: x509.private_key_managed: - - name: /etc/pki/elasticfleet-kafka.keyn + - name: /etc/pki/elasticfleet-kafka.key - keysize: 4096 - backup: True - new: True From 40b08d737c9025c8c73ba580d1078b474342ee42 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 4 Apr 2024 16:16:53 -0400 Subject: [PATCH 357/777] Generate kafka keystore on changes to kafka.key Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/config.sls | 15 ++++++++++++--- .../tools/sbin_jinja/so-kafka-generate-keystore | 11 ++++------- 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/salt/kafka/config.sls b/salt/kafka/config.sls index c8d6f66e0..dedc68fe8 100644 --- a/salt/kafka/config.sls +++ b/salt/kafka/config.sls @@ -80,10 +80,19 @@ kafka_data_dir: - group: 960 - makedirs: True -kafka_keystore_script: +kafka_generate_keystore: cmd.run: - - name: /usr/sbin/so-kafka-generate-keystore - - cwd: /opt/so + - name: "/usr/sbin/so-kafka-generate-keystore" + - onchanges: + - x509: /etc/pki/kafka.key + +kafka_keystore_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka.jks + - mode: 640 + - user: 960 + - group: 939 kafka_kraft_server_properties: file.managed: diff --git a/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore b/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore index 26f188377..8ae9d6db2 100644 --- a/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore +++ b/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore @@ -7,10 +7,7 @@ . /usr/sbin/so-common -if [ ! -f /etc/pki/kafka.jks ]; then - docker run -v /etc/pki/kafka.p12:/etc/pki/kafka.p12 --name so-kafka-keystore --user root --entrypoint keytool {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }} -importkeystore -srckeystore /etc/pki/kafka.p12 -srcstoretype PKCS12 -srcstorepass changeit -destkeystore /etc/pki/kafka.jks -deststoretype JKS -deststorepass changeit -noprompt - docker cp so-kafka-keystore:/etc/pki/kafka.jks /etc/pki/kafka.jks - docker rm so-kafka-keystore -else - exit 0 -fi +# Generate a new keystore +docker run -v /etc/pki/kafka.p12:/etc/pki/kafka.p12 --name so-kafka-keystore --user root --entrypoint keytool {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }} -importkeystore -srckeystore /etc/pki/kafka.p12 -srcstoretype PKCS12 -srcstorepass changeit -destkeystore /etc/pki/kafka.jks -deststoretype JKS -deststorepass changeit -noprompt +docker cp so-kafka-keystore:/etc/pki/kafka.jks /etc/pki/kafka.jks +docker rm so-kafka-keystore \ No newline at end of file From 436cbc1f0615228ee75bf674ca3e97e0106dcfe5 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 4 Apr 2024 16:21:29 -0400 Subject: [PATCH 358/777] Add kafka signing_policy for client/server auth. Add kafka-client cert on manager so manager can interact with kafka using its own cert Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/ca/files/signing_policies.conf | 14 ++++++ salt/ssl/init.sls | 77 +++++++++++++++++++++++++++-- 2 files changed, 86 insertions(+), 5 deletions(-) diff --git a/salt/ca/files/signing_policies.conf b/salt/ca/files/signing_policies.conf index 6f1b1f172..7f9c68750 100644 --- a/salt/ca/files/signing_policies.conf +++ b/salt/ca/files/signing_policies.conf @@ -70,3 +70,17 @@ x509_signing_policies: - authorityKeyIdentifier: keyid,issuer:always - days_valid: 820 - copypath: /etc/pki/issued_certs/ + kafka: + - minions: '*' + - signing_private_key: /etc/pki/ca.key + - signing_cert: /etc/pki/ca.crt + - C: US + - ST: Utah + - L: Salt Lake City + - basicConstraints: "critical CA:false" + - keyUsage: "digitalSignature, keyEncipherment" + - subjectKeyIdentifier: hash + - authorityKeyIdentifier: keyid,issuer:always + - extendedKeyUsage: "serverAuth, clientAuth" + - days_valid: 820 + - copypath: /etc/pki/issued_certs/ diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index e7b01bcd2..a99b030ff 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -687,7 +687,7 @@ kafka_crt: - name: /etc/pki/kafka.crt - ca_server: {{ ca_server }} - subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }} - - signing_policy: elasticfleet + - signing_policy: kafka - private_key: /etc/pki/kafka.key - CN: {{ GLOBALS.hostname }} - days_remaining: 0 @@ -704,7 +704,7 @@ kafka_crt: elasticfleet_kafka_key: x509.private_key_managed: - - name: /etc/pki/elasticfleet-kafka.keyn + - name: /etc/pki/elasticfleet-kafka.key - keysize: 4096 - backup: True - new: True @@ -720,7 +720,7 @@ elasticfleet_kafka_crt: x509.certificate_managed: - name: /etc/pki/elasticfleet-kafka.crt - ca_server: {{ ca_server }} - - signing_policy: elasticfleet + - signing_policy: kafka - private_key: /etc/pki/elasticfleet-kafka.key - CN: {{ GLOBALS.hostname }} - subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }} @@ -731,7 +731,58 @@ elasticfleet_kafka_crt: - retry: attempts: 5 interval: 30 + cmd.run: + - name: "/usr/bin/openssl pkcs8 -in /etc/pki/elasticfleet-kafka.key -topk8 -out /etc/pki/elasticfleet-kafka.p8 -nocrypt" + - onchanges: + - x509: elasticfleet_kafka_key +{% if grains['role'] in ['so-manager'] %} +kafka_client_key: + x509.private_key_managed: + - name: /etc/pki/kafka-client.key + - keysize: 4096 + - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/kafka-client.key') -%} + - prereq: + - x509: /etc/pki/kafka-client.crt + {%- endif %} + - retry: + attempts: 5 + interval: 30 + +kafka_client_crt: + x509.certificate_managed: + - name: /etc/pki/kafka-client.crt + - ca_server: {{ ca_server }} + - subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }} + - signing_policy: kafka + - private_key: /etc/pki/kafka-client.key + - CN: {{ GLOBALS.hostname }} + - days_remaining: 0 + - days_valid: 820 + - backup: True + - timeout: 30 + - retry: + attempts: 5 + interval: 30 + +kafka_client_key_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka-client.key + - mode: 640 + - user: 960 + - group: 939 + +kafka_client_crt_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka-client.crt + - mode: 640 + - user: 960 + - group: 939 +{% endif %} kafka_key_perms: file.managed: - replace: False @@ -748,7 +799,23 @@ kafka_crt_perms: - user: 960 - group: 939 -kafka_logstash_cert_perms: +kafka_pkcs8_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka.p8 + - mode: 640 + - user: 960 + - group: 939 + +kafka_pkcs12_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka.p12 + - mode: 640 + - user: 960 + - group: 939 + +elasticfleet_kafka_cert_perms: file.managed: - replace: False - name: /etc/pki/elasticfleet-kafka.crt @@ -756,7 +823,7 @@ kafka_logstash_cert_perms: - user: 960 - group: 939 -kafka_logstash_key_perms: +elasticfleet_kafka_key_perms: file.managed: - replace: False - name: /etc/pki/elasticfleet-kafka.key From 735cfb4c29d5ec3e96ab2a81edb024af00465b0b Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 4 Apr 2024 16:45:58 -0400 Subject: [PATCH 359/777] Autogenerate kafka topics when a message it sent to non-existing topic Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/etc/server.properties.jinja | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/kafka/etc/server.properties.jinja b/salt/kafka/etc/server.properties.jinja index 0032e1b32..486feb214 100644 --- a/salt/kafka/etc/server.properties.jinja +++ b/salt/kafka/etc/server.properties.jinja @@ -28,6 +28,7 @@ controller.quorum.voters={{ kraft_controller_quorum_voters }} listeners=BROKER://0.0.0.0:9092,CONTROLLER://0.0.0.0:9093 #listeners=BROKER://{{ kafka_ip }}:9092,CONTROLLER://{{ kafka_ip }}:9093 +auto.create.topics.enable=true # Name of listener used for communication between brokers. inter.broker.listener.name=BROKER From ca807bd6bd415b4def623a2e58ad710ac38f424b Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Thu, 4 Apr 2024 16:58:39 -0400 Subject: [PATCH 360/777] Use list not string --- salt/soc/merged.map.jinja | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/soc/merged.map.jinja b/salt/soc/merged.map.jinja index 4cd08a218..ae68dc01f 100644 --- a/salt/soc/merged.map.jinja +++ b/salt/soc/merged.map.jinja @@ -32,7 +32,7 @@ {# do not automatically enable Sigma rules if install is Eval or Import #} {% if grains['role'] in ['so-eval', 'so-import'] %} - {% do SOCMERGED.config.server.modules.elastalertengine.update({'autoEnabledSigmaRules': ""}) %} + {% do SOCMERGED.config.server.modules.elastalertengine.update({'autoEnabledSigmaRules': []}) %} {% endif %} {# remove these modules if detections is disabled #} From 21f86be8ee4cbc5d1329f3bed816eba5b794248b Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Fri, 5 Apr 2024 08:03:42 -0400 Subject: [PATCH 361/777] Update so-log-check --- salt/common/tools/sbin/so-log-check | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index cc5fef85b..d54c60168 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -235,6 +235,7 @@ exclude_log "curator.log" # ignore since Curator has been removed exclude_log "playbook.log" # Playbook is removed as of 2.4.70, logs may still be on disk exclude_log "mysqld.log" # MySQL is removed as of 2.4.70, logs may still be on disk exclude_log "soctopus.log" # Soctopus is removed as of 2.4.70, logs may still be on disk +exclude_log "agentstatus.log" # ignore this log since it tracks agents in error state for log_file in $(cat /tmp/log_check_files); do status "Checking log file $log_file" From 433309ef1a901591b4b6b1f60009cd25db5d6272 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Fri, 5 Apr 2024 09:35:12 -0400 Subject: [PATCH 362/777] Generate kafka cluster id if it doesn't exist Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/storage.sls | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/salt/kafka/storage.sls b/salt/kafka/storage.sls index 778c054e2..e99455e3d 100644 --- a/salt/kafka/storage.sls +++ b/salt/kafka/storage.sls @@ -6,7 +6,13 @@ {% from 'allowed_states.map.jinja' import allowed_states %} {% if sls.split('.')[0] in allowed_states %} {% from 'vars/globals.map.jinja' import GLOBALS %} -{% set kafka_cluster_id = salt['pillar.get']('secrets:kafka_cluster_id')%} +{% set kafka_cluster_id = salt['pillar.get']('secrets:kafka_cluster_id', default=None) %} + +{% if kafka_cluster_id is none %} +generate_kafka_cluster_id: + cmd.run: + - name: /usr/sbin/so-kafka-clusterid +{% endif %} {# Initialize kafka storage if it doesn't already exist. Just looking for meta.properties in /nsm/kafka/data #} {% if salt['file.file_exists']('/nsm/kafka/data/meta.properties') %} From 00cea6fb80f06248746cb709fd26b16590ee22ad Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Fri, 5 Apr 2024 11:22:47 -0600 Subject: [PATCH 363/777] Detection Author as a Keyword instead of Text With Quick Actions added to Detections, as many fields should be usable as possible. --- .../templates/component/so/detection-mappings.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/elasticsearch/templates/component/so/detection-mappings.json b/salt/elasticsearch/templates/component/so/detection-mappings.json index 9b68421e7..105a0ead2 100644 --- a/salt/elasticsearch/templates/component/so/detection-mappings.json +++ b/salt/elasticsearch/templates/component/so/detection-mappings.json @@ -30,7 +30,8 @@ "type": "keyword" }, "author": { - "type": "text" + "ignore_above": 1024, + "type": "keyword" }, "description": { "type": "text" From 721e04f793701e635c957b5f0f23f189a2083cd8 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Fri, 5 Apr 2024 13:37:14 -0400 Subject: [PATCH 364/777] initial logstash input from kafka over ssl Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/logstash/enabled.sls | 2 - .../config/so/0800_input_kafka.conf.jinja | 55 +++++++++-------- salt/ssl/init.sls | 59 +++++++++++++++++++ 3 files changed, 89 insertions(+), 27 deletions(-) diff --git a/salt/logstash/enabled.sls b/salt/logstash/enabled.sls index 798b1984a..fcc2ec190 100644 --- a/salt/logstash/enabled.sls +++ b/salt/logstash/enabled.sls @@ -78,8 +78,6 @@ so-logstash: {% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-searchnode', 'so-kafkanode' ] %} - /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro - /opt/so/conf/ca/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro - {% endif %} - {% if GLOBALS.role in ['so-kafkanode'] %} - /etc/pki/kafka-logstash.p12:/usr/share/logstash/kafka-logstash.p12:ro {% endif %} {% if GLOBALS.role == 'so-eval' %} diff --git a/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja b/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja index c1429319a..957f7da19 100644 --- a/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja +++ b/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja @@ -1,26 +1,31 @@ -{% set kafka_brokers = salt['pillar.get']('logstash:nodes:kafkanode', {}) %} -{% set broker_ips = [] %} -{% for node, node_data in kafka_brokers.items() %} - {% do broker_ips.append(node_data['ip'] + ":9092") %} -{% endfor %} - -{% set bootstrap_servers = "','".join(broker_ips) %} - - -#Run on searchnodes ingest kafka topic(s) group_id allows load balancing of event ingest to all searchnodes -input { - kafka { - codec => json - #Can ingest multiple topics. Set to a value from SOC UI? - topics => ['logstash-topic',] - group_id => 'searchnodes' - security_protocol => 'SSL' - bootstrap_servers => {{ bootstrap_servers }} - ssl_keystore_location => '/usr/share/logstash/kafka-logstash.p12' - ssl_keystore_password => '' - ssl_keystore_type => 'PKCS12' - ssl_truststore_location => '/etc/pki/ca-trust/extracted/java/cacerts' - # Set password as a pillar to avoid bad optics? This is default truststore for grid - ssl_truststore_password => 'changeit' - } +{% set kafka_brokers = salt['pillar.get']('logstash:nodes:kafkanode', {}) %} +{% set kafka_on_mngr = salt ['pillar.get']('logstash:nodes:manager', {}) %} +{% set broker_ips = [] %} +{% for node, node_data in kafka_brokers.items() %} + {% do broker_ips.append(node_data['ip'] + ":9092") %} +{% endfor %} + +{# For testing kafka stuff from manager not dedicated kafkanodes #} +{% for node, node_data in kafka_on_mngr.items() %} + {% do broker_ips.append(node_data['ip'] + ":9092") %} +{% endfor %} +{% set bootstrap_servers = "','".join(broker_ips) %} + + +#Run on searchnodes ingest kafka topic(s) group_id allows load balancing of event ingest to all searchnodes +input { + kafka { + codec => json + #Can ingest multiple topics. Set to a value from SOC UI? + topics => ['ea-logs'] + group_id => 'searchnodes' + security_protocol => 'SSL' + bootstrap_servers => '{{ bootstrap_servers }}' + ssl_keystore_location => '/usr/share/logstash/kafka-logstash.p12' + ssl_keystore_password => 'changeit' + ssl_keystore_type => 'PKCS12' + ssl_truststore_location => '/etc/pki/ca-trust/extracted/java/cacerts' + # Set password as a pillar to avoid bad optics? This is default truststore for grid + ssl_truststore_password => 'changeit' + } } \ No newline at end of file diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index a99b030ff..90f9cc64f 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -736,6 +736,40 @@ elasticfleet_kafka_crt: - onchanges: - x509: elasticfleet_kafka_key +kafka_logstash_key: + x509.private_key_managed: + - name: /etc/pki/kafka-logstash.key + - keysize: 4096 + - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/kafka-logstash.key') -%} + - prereq: + - x509: /etc/pki/kafka-logstash.crt + {%- endif %} + - retry: + attempts: 5 + interval: 30 + +kafka_logstash_crt: + x509.certificate_managed: + - name: /etc/pki/kafka-logstash.crt + - ca_server: {{ ca_server }} + - subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }} + - signing_policy: kafka + - private_key: /etc/pki/kafka-logstash.key + - CN: {{ GLOBALS.hostname }} + - days_remaining: 0 + - days_valid: 820 + - backup: True + - timeout: 30 + - retry: + attempts: 5 + interval: 30 + cmd.run: + - name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka-logstash.key -in /etc/pki/kafka-logstash.crt -export -out /etc/pki/kafka-logstash.p12 -nodes -passout pass:changeit" + - onchanges: + - x509: /etc/pki/kafka-logstash.key + {% if grains['role'] in ['so-manager'] %} kafka_client_key: x509.private_key_managed: @@ -783,6 +817,7 @@ kafka_client_crt_perms: - user: 960 - group: 939 {% endif %} + kafka_key_perms: file.managed: - replace: False @@ -799,6 +834,30 @@ kafka_crt_perms: - user: 960 - group: 939 +kafka_logstash_key_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka-logstash.key + - mode: 640 + - user: 960 + - group: 939 + +kafka_logstash_crt_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka-logstash.crt + - mode: 640 + - user: 960 + - group: 939 + +kafka_logstash_pkcs12_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka-logstash.p12 + - mode: 640 + - user: 960 + - group: 931 + kafka_pkcs8_perms: file.managed: - replace: False From 65274e89d7c8741fe63536e030483c2e2af21665 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Fri, 5 Apr 2024 15:38:00 -0400 Subject: [PATCH 365/777] Add client_id to logstash pipeline. To identify which searchnode is pulling messages Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja b/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja index 957f7da19..0260b774e 100644 --- a/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja +++ b/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja @@ -19,6 +19,7 @@ input { #Can ingest multiple topics. Set to a value from SOC UI? topics => ['ea-logs'] group_id => 'searchnodes' + client_id => '{{ GLOBALS.hostname }}' security_protocol => 'SSL' bootstrap_servers => '{{ bootstrap_servers }}' ssl_keystore_location => '/usr/share/logstash/kafka-logstash.p12' From 376efab40ca344fc0fb201d30c3c067b48d22363 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Mon, 8 Apr 2024 14:01:38 -0400 Subject: [PATCH 366/777] Ship Defender logs --- .../endpoints-initial/windows-defender.json | 29 +++++++++++++++++++ .../grid-nodes_general/system-grid-nodes.json | 6 ++++ .../grid-nodes_heavy/system-grid-nodes.json | 6 ++++ .../files/ingest/.fleet_final_pipeline-1 | 1 + salt/soc/files/soc/sigma_so_pipeline.yaml | 9 ++++++ 5 files changed, 51 insertions(+) create mode 100644 salt/elasticfleet/files/integrations/endpoints-initial/windows-defender.json diff --git a/salt/elasticfleet/files/integrations/endpoints-initial/windows-defender.json b/salt/elasticfleet/files/integrations/endpoints-initial/windows-defender.json new file mode 100644 index 000000000..ac4394e62 --- /dev/null +++ b/salt/elasticfleet/files/integrations/endpoints-initial/windows-defender.json @@ -0,0 +1,29 @@ +{ + "package": { + "name": "winlog", + "version": "" + }, + "name": "windows-defender", + "namespace": "default", + "description": "Windows Defender - Operational logs", + "policy_id": "endpoints-initial", + "inputs": { + "winlogs-winlog": { + "enabled": true, + "streams": { + "winlog.winlog": { + "enabled": true, + "vars": { + "channel": "Microsoft-Windows-Windows Defender/Operational", + "data_stream.dataset": "winlog.winlog", + "preserve_original_event": false, + "providers": [], + "ignore_older": "72h", + "language": 0, + "tags": [] } + } + } + } + }, + "force": true +} diff --git a/salt/elasticfleet/files/integrations/grid-nodes_general/system-grid-nodes.json b/salt/elasticfleet/files/integrations/grid-nodes_general/system-grid-nodes.json index 8e6bf7958..98204e894 100644 --- a/salt/elasticfleet/files/integrations/grid-nodes_general/system-grid-nodes.json +++ b/salt/elasticfleet/files/integrations/grid-nodes_general/system-grid-nodes.json @@ -16,6 +16,9 @@ "paths": [ "/var/log/auth.log*", "/var/log/secure*" + ], + "tags": [ + "so-grid-node" ] } }, @@ -25,6 +28,9 @@ "paths": [ "/var/log/messages*", "/var/log/syslog*" + ], + "tags": [ + "so-grid-node" ] } } diff --git a/salt/elasticfleet/files/integrations/grid-nodes_heavy/system-grid-nodes.json b/salt/elasticfleet/files/integrations/grid-nodes_heavy/system-grid-nodes.json index 6c42086bc..42918cc97 100644 --- a/salt/elasticfleet/files/integrations/grid-nodes_heavy/system-grid-nodes.json +++ b/salt/elasticfleet/files/integrations/grid-nodes_heavy/system-grid-nodes.json @@ -16,6 +16,9 @@ "paths": [ "/var/log/auth.log*", "/var/log/secure*" + ], + "tags": [ + "so-grid-node" ] } }, @@ -25,6 +28,9 @@ "paths": [ "/var/log/messages*", "/var/log/syslog*" + ], + "tags": [ + "so-grid-node" ] } } diff --git a/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 b/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 index 18d078244..89216077a 100644 --- a/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 +++ b/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 @@ -83,6 +83,7 @@ { "date": { "if": "ctx.event?.module == 'system'", "field": "event.created", "target_field": "@timestamp", "formats": ["yyyy-MM-dd'T'HH:mm:ss.SSSSSS'Z'"] } }, { "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } }, { "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } }, + { "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } }, { "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp" ], "ignore_missing": true, "ignore_failure": true } } ], "on_failure": [ diff --git a/salt/soc/files/soc/sigma_so_pipeline.yaml b/salt/soc/files/soc/sigma_so_pipeline.yaml index d227c3f01..312d07965 100644 --- a/salt/soc/files/soc/sigma_so_pipeline.yaml +++ b/salt/soc/files/soc/sigma_so_pipeline.yaml @@ -79,3 +79,12 @@ transformations: - type: logsource product: windows category: driver_load + - id: linux_security_add-fields + type: add_condition + conditions: + event.module: 'system' + event.dataset: 'system.auth' + rule_conditions: + - type: logsource + product: linux + service: auth From d67ebabc951cfaf226b176f6eb458ec2b4c35127 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Mon, 8 Apr 2024 16:38:03 -0400 Subject: [PATCH 367/777] Remove logstash output to kafka pipeline. Add additional topics for searchnodes to ingest and add partition/offset info to event Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- .../config/so/0800_input_kafka.conf.jinja | 15 ++++++++----- .../config/so/0899_output_kafka.conf.jinja | 22 ------------------- 2 files changed, 10 insertions(+), 27 deletions(-) delete mode 100644 salt/logstash/pipelines/config/so/0899_output_kafka.conf.jinja diff --git a/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja b/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja index 0260b774e..1391ce983 100644 --- a/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja +++ b/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja @@ -11,13 +11,10 @@ {% endfor %} {% set bootstrap_servers = "','".join(broker_ips) %} - -#Run on searchnodes ingest kafka topic(s) group_id allows load balancing of event ingest to all searchnodes input { kafka { codec => json - #Can ingest multiple topics. Set to a value from SOC UI? - topics => ['ea-logs'] + topics => ['default-logs', 'kratos-logs', 'soc-logs', 'strelka-logs', 'suricata-logs', 'zeek-logs'] group_id => 'searchnodes' client_id => '{{ GLOBALS.hostname }}' security_protocol => 'SSL' @@ -26,7 +23,15 @@ input { ssl_keystore_password => 'changeit' ssl_keystore_type => 'PKCS12' ssl_truststore_location => '/etc/pki/ca-trust/extracted/java/cacerts' - # Set password as a pillar to avoid bad optics? This is default truststore for grid ssl_truststore_password => 'changeit' + decorate_events => true + tags => [ "elastic-agent", "input-{{ GLOBALS.hostname}}", "kafka" ] } +} +filter { + if ![metadata] { + mutate { + rename => { "@metadata" => "metadata" } + } + } } \ No newline at end of file diff --git a/salt/logstash/pipelines/config/so/0899_output_kafka.conf.jinja b/salt/logstash/pipelines/config/so/0899_output_kafka.conf.jinja deleted file mode 100644 index ff9a6f6ee..000000000 --- a/salt/logstash/pipelines/config/so/0899_output_kafka.conf.jinja +++ /dev/null @@ -1,22 +0,0 @@ -{% set kafka_brokers = salt['pillar.get']('logstash:nodes:kafkanode', {}) %} -{% set broker_ips = [] %} -{% for node, node_data in kafka_brokers.items() %} - {% do broker_ips.append(node_data['ip'] + ":9092") %} -{% endfor %} - -{% set bootstrap_servers = "','".join(broker_ips) %} - -#Run on kafka broker logstash writes to topic 'logstash-topic' -output { - kafka { - codec => json - topic_id => 'logstash-topic' - bootstrap_servers => '{{ bootstrap_servers }}' - security_protocol => 'SSL' - ssl_keystore_location => '/usr/share/logstash/kafka-logstash.p12' - ssl_keystore_password => '' - ssl_keystore_type => 'PKCS12' - ssl_truststore_location => '/etc/pki/ca-trust/extracted/java/cacerts' - ssl_truststore_password => 'changeit' - } -} \ No newline at end of file From 6217a7b9a94a972ecab3e76a436cadf76ce3f4ab Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 9 Apr 2024 09:27:21 -0400 Subject: [PATCH 368/777] add defaults and jijafy kafka config --- salt/kafka/config.map.jinja | 8 ++ salt/kafka/config.sls | 12 +-- salt/kafka/defaults.yaml | 39 ++++++++ salt/kafka/enabled.sls | 17 ++-- salt/kafka/etc/client.properties.jinja | 2 + salt/kafka/etc/server.properties.jinja | 130 +------------------------ salt/kafka/map.jinja | 15 +++ 7 files changed, 81 insertions(+), 142 deletions(-) create mode 100644 salt/kafka/config.map.jinja create mode 100644 salt/kafka/defaults.yaml create mode 100644 salt/kafka/etc/client.properties.jinja create mode 100644 salt/kafka/map.jinja diff --git a/salt/kafka/config.map.jinja b/salt/kafka/config.map.jinja new file mode 100644 index 000000000..ab43d84a9 --- /dev/null +++ b/salt/kafka/config.map.jinja @@ -0,0 +1,8 @@ +{% from 'vars/globals.map.jinja' import GLOBALS %} +{% from 'kafka/map.jinja' import KAFKAMERGED %} + +{% set KAFKACONFIG = {} %} +{% for k, v in KAFKAMERGED.config.keys() %} +{% do KAFKACONFIG.update({k | replace("_x_", "."): v}) %} +{% endfor %} + diff --git a/salt/kafka/config.sls b/salt/kafka/config.sls index dedc68fe8..523681ba0 100644 --- a/salt/kafka/config.sls +++ b/salt/kafka/config.sls @@ -94,19 +94,17 @@ kafka_keystore_perms: - user: 960 - group: 939 -kafka_kraft_server_properties: +{% for sc in ['server', 'client'] %} +kafka_kraft_{{type}}_properties: file.managed: - - source: salt://kafka/etc/server.properties.jinja - - name: /opt/so/conf/kafka/server.properties + - source: salt://kafka/etc/{{sc}}.properties.jinja + - name: /opt/so/conf/kafka/{{sc}}.properties - template: jinja - - defaults: - kafka_nodeid: {{ kafka_nodeid }} - kraft_controller_quorum_voters: {{ kraft_controller_quorum_voters }} - kafka_ip: {{ kafka_ip }} - user: 960 - group: 960 - makedirs: True - show_changes: False +{% endfor %} {% else %} diff --git a/salt/kafka/defaults.yaml b/salt/kafka/defaults.yaml new file mode 100644 index 000000000..7828f0536 --- /dev/null +++ b/salt/kafka/defaults.yaml @@ -0,0 +1,39 @@ +kafka: + enabled: False + config: + server: + advertised_x_listeners: BROKER://10.66.166.231:9092 + auto_x_create_x_topics_x_enable: true + controller_x_listener_x_names: CONTROLLER + controller_x_quorum_x_voters: + inter_x_broker_x_listener_x_name: BROKER + listeners: BROKER://0.0.0.0:9092,CONTROLLER://0.0.0.0:9093 + listener_x_security_x_protocol_x_map: CONTROLLER:SSL,BROKER:SSL + log_x_dirs: /nsm/kafka/data + log_x_retention_x_check_x_interval_x_ms: 300000 + log_x_retention_x_hours: 168 + log_x_segment_x_bytes: 1073741824 + node_x_id: + num_x_io_x_threads: 8 + num_x_network_x_threads: 3 + num_x_partitions: 1 + num_x_recovery_x_threads_x_per_x_data_x_dir: 1 + offsets_x_topic_x_replication_x_factor: 1 + process_x_roles: broker,controller + socket_x_receive_x_buffer_x_bytes: 102400 + socket_x_request_x_max_x_bytes: 104857600 + socket_x_send_x_buffer_x_bytes: 102400 + ssl_x_keystore_x_location: /etc/pki/kafka.jks + ssl_x_keystore_x_password: changeit + ssl_x_keystore_x_type: JKS + ssl_x_truststore_x_location: /etc/pki/java/sos/cacerts + ssl_x_truststore_x_password: changeit + transaction_x_state_x_log_x_min_x_isr: 1 + transaction_x_state_x_log_x_replication_x_factor: 1 + client: + security_x_protocol: SSL + ssl_x_truststore_x_location: /etc/pki/java/sos/cacerts + ssl_x_truststore_x_password: changeit + ssl_x_keystore_x_location: /etc/pki/kafka.jks + ssl_x_keystore_x_type: JKS + ssl_x_keystore_x_password: changeit diff --git a/salt/kafka/enabled.sls b/salt/kafka/enabled.sls index 49a0a9bbd..c2fca70db 100644 --- a/salt/kafka/enabled.sls +++ b/salt/kafka/enabled.sls @@ -26,14 +26,14 @@ so-kafka: - environment: - KAFKA_HEAP_OPTS=-Xmx2G -Xms1G - extra_hosts: - {% for node in KAFKANODES %} + {% for node in KAFKANODES %} - {{ node }}:{{ KAFKANODES[node].ip }} - {% endfor %} - {% if DOCKER.containers['so-kafka'].extra_hosts %} - {% for XTRAHOST in DOCKER.containers['so-kafka'].extra_hosts %} + {% endfor %} + {% if DOCKER.containers['so-kafka'].extra_hosts %} + {% for XTRAHOST in DOCKER.containers['so-kafka'].extra_hosts %} - {{ XTRAHOST }} - {% endfor %} - {% endif %} + {% endfor %} + {% endif %} - port_bindings: {% for BINDING in DOCKER.containers['so-kafka'].port_bindings %} - {{ BINDING }} @@ -43,8 +43,11 @@ so-kafka: - /opt/so/conf/ca/cacerts:/etc/pki/java/sos/cacerts - /nsm/kafka/data/:/nsm/kafka/data/:rw - /opt/so/conf/kafka/server.properties:/kafka/config/kraft/server.properties + - /opt/so/conf/kafka/client.properties:/kafka/config/kraft/client.properties - watch: - - file: kafka_kraft_server_properties + {% for sc in ['server', 'client'] %} + - file: kafka_kraft_{{sc}}_properties + {% endfor %} delete_so-kafka_so-status.disabled: file.uncomment: diff --git a/salt/kafka/etc/client.properties.jinja b/salt/kafka/etc/client.properties.jinja new file mode 100644 index 000000000..9f01904e4 --- /dev/null +++ b/salt/kafka/etc/client.properties.jinja @@ -0,0 +1,2 @@ +{%- from 'kafka/config.map.jinja' import KAFKACONFIG %} +{{ KAFKACONFIG.client }} diff --git a/salt/kafka/etc/server.properties.jinja b/salt/kafka/etc/server.properties.jinja index 486feb214..a18262ac2 100644 --- a/salt/kafka/etc/server.properties.jinja +++ b/salt/kafka/etc/server.properties.jinja @@ -1,128 +1,2 @@ -# This configuration file is intended for use in KRaft mode, where -# Apache ZooKeeper is not present. See config/kraft/README.md for details. -# - -############################# Server Basics ############################# - -# The role of this server. Setting this puts us in KRaft mode -process.roles=broker,controller - -# The node id associated with this instance's roles -node.id={{ kafka_nodeid }} - -# The connect string for the controller quorum -controller.quorum.voters={{ kraft_controller_quorum_voters }} - -############################# Socket Server Settings ############################# - -# The address the socket server listens on. -# Combined nodes (i.e. those with `process.roles=broker,controller`) must list the controller listener here at a minimum. -# If the broker listener is not defined, the default listener will use a host name that is equal to the value of java.net.InetAddress.getCanonicalHostName(), -# with PLAINTEXT listener name, and port 9092. -# FORMAT: -# listeners = listener_name://host_name:port -# EXAMPLE: -# listeners = PLAINTEXT://your.host.name:9092 - -# using 0.0.0.0 eliminates issues with binding to 9092 and 9093 in initial testing -listeners=BROKER://0.0.0.0:9092,CONTROLLER://0.0.0.0:9093 -#listeners=BROKER://{{ kafka_ip }}:9092,CONTROLLER://{{ kafka_ip }}:9093 - -auto.create.topics.enable=true - -# Name of listener used for communication between brokers. -inter.broker.listener.name=BROKER - -# Listener name, hostname and port the broker will advertise to clients. -# If not set, it uses the value for "listeners". -advertised.listeners=BROKER://{{ kafka_ip }}:9092 - -# A comma-separated list of the names of the listeners used by the controller. -# If no explicit mapping set in `listener.security.protocol.map`, default will be using PLAINTEXT protocol -# This is required if running in KRaft mode. -controller.listener.names=CONTROLLER - -# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details -listener.security.protocol.map=CONTROLLER:SSL,BROKER:SSL - -#SSL configuration -ssl.keystore.location=/etc/pki/kafka.jks -ssl.keystore.password=changeit -ssl.keystore.type=JKS -ssl.truststore.location=/etc/pki/java/sos/cacerts -ssl.truststore.password=changeit - -# The number of threads that the server uses for receiving requests from the network and sending responses to the network -num.network.threads=3 - -# The number of threads that the server uses for processing requests, which may include disk I/O -num.io.threads=8 - -# The send buffer (SO_SNDBUF) used by the socket server -socket.send.buffer.bytes=102400 - -# The receive buffer (SO_RCVBUF) used by the socket server -socket.receive.buffer.bytes=102400 - -# The maximum size of a request that the socket server will accept (protection against OOM) -socket.request.max.bytes=104857600 - - -############################# Log Basics ############################# - -# A comma separated list of directories under which to store log files -log.dirs=/nsm/kafka/data - -# The default number of log partitions per topic. More partitions allow greater -# parallelism for consumption, but this will also result in more files across -# the brokers. -num.partitions=1 - -# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. -# This value is recommended to be increased for installations with data dirs located in RAID array. -num.recovery.threads.per.data.dir=1 - -############################# Internal Topic Settings ############################# -# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" -# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. -offsets.topic.replication.factor=1 -transaction.state.log.replication.factor=1 -transaction.state.log.min.isr=1 - -############################# Log Flush Policy ############################# - -# Messages are immediately written to the filesystem but by default we only fsync() to sync -# the OS cache lazily. The following configurations control the flush of data to disk. -# There are a few important trade-offs here: -# 1. Durability: Unflushed data may be lost if you are not using replication. -# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. -# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks. -# The settings below allow one to configure the flush policy to flush data after a period of time or -# every N messages (or both). This can be done globally and overridden on a per-topic basis. - -# The number of messages to accept before forcing a flush of data to disk -#log.flush.interval.messages=10000 - -# The maximum amount of time a message can sit in a log before we force a flush -#log.flush.interval.ms=1000 - -############################# Log Retention Policy ############################# - -# The following configurations control the disposal of log segments. The policy can -# be set to delete segments after a period of time, or after a given size has accumulated. -# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens -# from the end of the log. - -# The minimum age of a log file to be eligible for deletion due to age -log.retention.hours=168 - -# A size-based retention policy for logs. Segments are pruned from the log unless the remaining -# segments drop below log.retention.bytes. Functions independently of log.retention.hours. -#log.retention.bytes=1073741824 - -# The maximum size of a log segment file. When this size is reached a new log segment will be created. -log.segment.bytes=1073741824 - -# The interval at which log segments are checked to see if they can be deleted according -# to the retention policies -log.retention.check.interval.ms=300000 +{%- from 'kafka/config.map.jinja' import KAFKACONFIG %} +{{ KAFKACONFIG.server }} diff --git a/salt/kafka/map.jinja b/salt/kafka/map.jinja new file mode 100644 index 000000000..f0e389e4a --- /dev/null +++ b/salt/kafka/map.jinja @@ -0,0 +1,15 @@ +{% import_yaml 'kafka/defaults.yaml' as KAFKADEFAULTS %} +{% set KAFKAMERGED = salt['pillar.get']('kafka', KAFKADEFAULTS.kafka, merge=True) %} +{% from 'vars/globals.map.jinja' import GLOBALS %} + +{% set KAFKAMERGED.config.server.node_x_id = salt['pillar.get']('kafka:nodes:' ~ GLOBALS.hostname ~ ':nodeid') %} +{% set KAFKAMERGED.config.server.advertised_x_listeners = 'BROKER://' ~ GLOBALS.node_ip ~ ':9092' %} + +{% set nodes = salt['pillar.get']('kafka:nodes', {}) %} +{% set combined = [] %} +{% for hostname, data in nodes.items() %} + {% do combined.append(data.nodeid ~ "@" ~ hostname ~ ":9093") %} +{% endfor %} +{% set kraft_controller_quorum_voters = ','.join(combined) %} + +{% set KAFKAMERGED.config.server.controller_x_quorum_x_voters = kraft_controller_quorum_voters %} From 7aa00faa6c37bbe87ab4c4ed36647a9c1c25f6be Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 9 Apr 2024 09:31:54 -0400 Subject: [PATCH 369/777] fix var --- salt/kafka/config.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/kafka/config.sls b/salt/kafka/config.sls index 523681ba0..1666e65ae 100644 --- a/salt/kafka/config.sls +++ b/salt/kafka/config.sls @@ -95,7 +95,7 @@ kafka_keystore_perms: - group: 939 {% for sc in ['server', 'client'] %} -kafka_kraft_{{type}}_properties: +kafka_kraft_{{sc}}_properties: file.managed: - source: salt://kafka/etc/{{sc}}.properties.jinja - name: /opt/so/conf/kafka/{{sc}}.properties From c48436ccbf2f542954b2e46585c005a3203d27be Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 9 Apr 2024 10:19:17 -0400 Subject: [PATCH 370/777] fix dict update --- salt/kafka/map.jinja | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/kafka/map.jinja b/salt/kafka/map.jinja index f0e389e4a..f1da7ec95 100644 --- a/salt/kafka/map.jinja +++ b/salt/kafka/map.jinja @@ -2,8 +2,8 @@ {% set KAFKAMERGED = salt['pillar.get']('kafka', KAFKADEFAULTS.kafka, merge=True) %} {% from 'vars/globals.map.jinja' import GLOBALS %} -{% set KAFKAMERGED.config.server.node_x_id = salt['pillar.get']('kafka:nodes:' ~ GLOBALS.hostname ~ ':nodeid') %} -{% set KAFKAMERGED.config.server.advertised_x_listeners = 'BROKER://' ~ GLOBALS.node_ip ~ ':9092' %} +{% do KAFKAMERGED.config.server.update({ 'node_x_id': salt['pillar.get']('kafka:nodes:' ~ GLOBALS.hostname ~ ':nodeid')}) %} +{% do KAFKAMERGED.config.server.update({'advertised_x_listeners': 'BROKER://' ~ GLOBALS.node_ip ~ ':9092'}) %} {% set nodes = salt['pillar.get']('kafka:nodes', {}) %} {% set combined = [] %} @@ -12,4 +12,4 @@ {% endfor %} {% set kraft_controller_quorum_voters = ','.join(combined) %} -{% set KAFKAMERGED.config.server.controller_x_quorum_x_voters = kraft_controller_quorum_voters %} +{% do KAFKAMERGED.config.server.update({'controller_x_quorum_x_voters': kraft_controller_quorum_voters}) %} From daa5342986b2bf5cf7b3106888d4a0cf89048008 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 9 Apr 2024 10:22:05 -0400 Subject: [PATCH 371/777] items not keys in for loop --- salt/kafka/config.map.jinja | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/kafka/config.map.jinja b/salt/kafka/config.map.jinja index ab43d84a9..33f9f8387 100644 --- a/salt/kafka/config.map.jinja +++ b/salt/kafka/config.map.jinja @@ -2,7 +2,7 @@ {% from 'kafka/map.jinja' import KAFKAMERGED %} {% set KAFKACONFIG = {} %} -{% for k, v in KAFKAMERGED.config.keys() %} +{% for k, v in KAFKAMERGED.config.items() %} {% do KAFKACONFIG.update({k | replace("_x_", "."): v}) %} {% endfor %} From d38051e806c505bc1023291f90197b7c5dab7500 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 9 Apr 2024 10:36:37 -0400 Subject: [PATCH 372/777] fix client and server properties formatting --- salt/kafka/etc/client.properties.jinja | 4 ++-- salt/kafka/etc/server.properties.jinja | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/kafka/etc/client.properties.jinja b/salt/kafka/etc/client.properties.jinja index 9f01904e4..1353ac491 100644 --- a/salt/kafka/etc/client.properties.jinja +++ b/salt/kafka/etc/client.properties.jinja @@ -1,2 +1,2 @@ -{%- from 'kafka/config.map.jinja' import KAFKACONFIG %} -{{ KAFKACONFIG.client }} +{% from 'kafka/config.map.jinja' import KAFKACONFIG -%} +{{ KAFKACONFIG.client | yaml(False) }} diff --git a/salt/kafka/etc/server.properties.jinja b/salt/kafka/etc/server.properties.jinja index a18262ac2..b02730faa 100644 --- a/salt/kafka/etc/server.properties.jinja +++ b/salt/kafka/etc/server.properties.jinja @@ -1,2 +1,2 @@ -{%- from 'kafka/config.map.jinja' import KAFKACONFIG %} -{{ KAFKACONFIG.server }} +{% from 'kafka/config.map.jinja' import KAFKACONFIG -%} +{{ KAFKACONFIG.server | yaml(False) }} From bd5fe4328561017b87d78493867d1f410e83ffc1 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 9 Apr 2024 11:07:53 -0400 Subject: [PATCH 373/777] jinja config files --- salt/kafka/config.map.jinja | 8 -------- salt/kafka/config.sls | 9 --------- salt/kafka/etc/client.properties.jinja | 4 ++-- salt/kafka/etc/server.properties.jinja | 4 ++-- 4 files changed, 4 insertions(+), 21 deletions(-) delete mode 100644 salt/kafka/config.map.jinja diff --git a/salt/kafka/config.map.jinja b/salt/kafka/config.map.jinja deleted file mode 100644 index 33f9f8387..000000000 --- a/salt/kafka/config.map.jinja +++ /dev/null @@ -1,8 +0,0 @@ -{% from 'vars/globals.map.jinja' import GLOBALS %} -{% from 'kafka/map.jinja' import KAFKAMERGED %} - -{% set KAFKACONFIG = {} %} -{% for k, v in KAFKAMERGED.config.items() %} -{% do KAFKACONFIG.update({k | replace("_x_", "."): v}) %} -{% endfor %} - diff --git a/salt/kafka/config.sls b/salt/kafka/config.sls index 1666e65ae..c856c4f80 100644 --- a/salt/kafka/config.sls +++ b/salt/kafka/config.sls @@ -10,16 +10,8 @@ {% set kafka_ips_logstash = [] %} {% set kafka_ips_kraft = [] %} {% set kafkanodes = salt['pillar.get']('kafka:nodes', {}) %} -{% set kafka_nodeid = salt['pillar.get']('kafka:nodes:' ~ GLOBALS.hostname ~ ':nodeid') %} {% set kafka_ip = GLOBALS.node_ip %} -{% set nodes = salt['pillar.get']('kafka:nodes', {}) %} -{% set combined = [] %} -{% for hostname, data in nodes.items() %} - {% do combined.append(data.nodeid ~ "@" ~ hostname ~ ":9093") %} -{% endfor %} -{% set kraft_controller_quorum_voters = ','.join(combined) %} - {# Create list for kafka <-> logstash/searchnode communcations #} {% for node, node_data in kafkanodes.items() %} {% do kafka_ips_logstash.append(node_data['ip'] + ":9092") %} @@ -32,7 +24,6 @@ {% endfor %} {% set kraft_server_list = "','".join(kafka_ips_kraft) %} - include: - ssl diff --git a/salt/kafka/etc/client.properties.jinja b/salt/kafka/etc/client.properties.jinja index 1353ac491..0245c3c42 100644 --- a/salt/kafka/etc/client.properties.jinja +++ b/salt/kafka/etc/client.properties.jinja @@ -1,2 +1,2 @@ -{% from 'kafka/config.map.jinja' import KAFKACONFIG -%} -{{ KAFKACONFIG.client | yaml(False) }} +{% from 'kafka/map.jinja' import KAFKAMERGED -%} +{{ KAFKAMERGED.config.client | yaml(False) | replace("_x_", ".") }} diff --git a/salt/kafka/etc/server.properties.jinja b/salt/kafka/etc/server.properties.jinja index b02730faa..90a80063f 100644 --- a/salt/kafka/etc/server.properties.jinja +++ b/salt/kafka/etc/server.properties.jinja @@ -1,2 +1,2 @@ -{% from 'kafka/config.map.jinja' import KAFKACONFIG -%} -{{ KAFKACONFIG.server | yaml(False) }} +{% from 'kafka/map.jinja' import KAFKAMERGED -%} +{{ KAFKAMERGED.config.server | yaml(False) | replace("_x_", ".") }} From 2206553e038a8c6fb32b0c680b96b79f1137c583 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 10 Apr 2024 09:49:21 -0400 Subject: [PATCH 374/777] Update analyst.json --- salt/elasticsearch/roles/analyst.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/elasticsearch/roles/analyst.json b/salt/elasticsearch/roles/analyst.json index 2fd10ab47..90ff95ad4 100644 --- a/salt/elasticsearch/roles/analyst.json +++ b/salt/elasticsearch/roles/analyst.json @@ -27,7 +27,8 @@ "monitor", "read", "read_cross_cluster", - "view_index_metadata" + "view_index_metadata", + "write" ] } ], From 86b984001d4088eb24449f522ded6c6ebc616f37 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 10 Apr 2024 10:39:06 -0400 Subject: [PATCH 375/777] annotations and enable/disable from ui --- salt/kafka/disabled.sls | 16 +++ salt/kafka/etc/client.properties.jinja | 5 + salt/kafka/etc/server.properties.jinja | 5 + salt/kafka/init.sls | 8 +- salt/kafka/map.jinja | 5 + salt/kafka/soc_kafka.yaml | 164 +++++++++++++++++++++++++ 6 files changed, 201 insertions(+), 2 deletions(-) create mode 100644 salt/kafka/disabled.sls create mode 100644 salt/kafka/soc_kafka.yaml diff --git a/salt/kafka/disabled.sls b/salt/kafka/disabled.sls new file mode 100644 index 000000000..6658f0c5e --- /dev/null +++ b/salt/kafka/disabled.sls @@ -0,0 +1,16 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +include: + - kafka.sostatus + +so-kafka: + docker_container.absent: + - force: True + +so-kafka_so-status.disabled: + file.comment: + - name: /opt/so/conf/so-status/so-status.conf + - regex: ^so-kafka$ diff --git a/salt/kafka/etc/client.properties.jinja b/salt/kafka/etc/client.properties.jinja index 0245c3c42..91ff5f7c2 100644 --- a/salt/kafka/etc/client.properties.jinja +++ b/salt/kafka/etc/client.properties.jinja @@ -1,2 +1,7 @@ +{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one + or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at + https://securityonion.net/license; you may not use this file except in compliance with the + Elastic License 2.0. #} + {% from 'kafka/map.jinja' import KAFKAMERGED -%} {{ KAFKAMERGED.config.client | yaml(False) | replace("_x_", ".") }} diff --git a/salt/kafka/etc/server.properties.jinja b/salt/kafka/etc/server.properties.jinja index 90a80063f..df5632ba9 100644 --- a/salt/kafka/etc/server.properties.jinja +++ b/salt/kafka/etc/server.properties.jinja @@ -1,2 +1,7 @@ +{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one + or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at + https://securityonion.net/license; you may not use this file except in compliance with the + Elastic License 2.0. #} + {% from 'kafka/map.jinja' import KAFKAMERGED -%} {{ KAFKAMERGED.config.server | yaml(False) | replace("_x_", ".") }} diff --git a/salt/kafka/init.sls b/salt/kafka/init.sls index 903c66867..b4a6a28b0 100644 --- a/salt/kafka/init.sls +++ b/salt/kafka/init.sls @@ -3,7 +3,11 @@ # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. -{# Create map.jinja to enable / disable kafka from UI #} -{# Temporarily just enable kafka #} +{% from 'kafka/map.jinja' import KAFKAMERGED %} + include: +{% if KAFKAMERGED.enabled %} - kafka.enabled +{% else %} + - kafka.disabled +{% endif %} diff --git a/salt/kafka/map.jinja b/salt/kafka/map.jinja index f1da7ec95..771e6102b 100644 --- a/salt/kafka/map.jinja +++ b/salt/kafka/map.jinja @@ -1,3 +1,8 @@ +{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one + or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at + https://securityonion.net/license; you may not use this file except in compliance with the + Elastic License 2.0. #} + {% import_yaml 'kafka/defaults.yaml' as KAFKADEFAULTS %} {% set KAFKAMERGED = salt['pillar.get']('kafka', KAFKADEFAULTS.kafka, merge=True) %} {% from 'vars/globals.map.jinja' import GLOBALS %} diff --git a/salt/kafka/soc_kafka.yaml b/salt/kafka/soc_kafka.yaml new file mode 100644 index 000000000..c16ff212e --- /dev/null +++ b/salt/kafka/soc_kafka.yaml @@ -0,0 +1,164 @@ +kafka: + enabled: + description: Enable or disable Kafka. + helpLink: kafka.html + config: + server: + advertised_x_listeners: + description: Specify the list of listeners (hostname and port) that Kafka brokers provide to clients for communication. + title: advertised.listeners + helpLink: kafka.html + auto_x_create_x_topics_x_enable: + description: Enable the auto creation of topics. + title: auto.create.topics.enable + forcedType: bool + helpLink: kafka.html + controller_x_listener_x_names: + description: Set listeners used by the controller in a comma-seperated list. + title: controller.listener.names + helpLink: kafka.html + controller_x_quorum_x_voters: + description: A comma-seperated list of ID and endpoint information mapped for a set of voters. + title: controller.quorum.voters + helpLink: kafka.html + inter_x_broker_x_listener_x_name: + description: The name of the listener used for inter-broker communication. + title: inter.broker.listener.name + helpLink: kafka.html + listeners: + description: Set of URIs that is listened on and the listener names in a comma-seperated list. + helpLink: kafka.html + listener_x_security_x_protocol_x_map: + description: Comma-seperated mapping of listener name and security protocols. + title: listener.security.protocol.map + helpLink: kafka.html + log_x_dirs: + description: Where Kafka logs are stored within the Docker container. + title: log.dirs + helpLink: kafka.html + log_x_retention_x_check_x_interval_x_ms: + description: Frequency at which log files are checked if they are qualified for deletion. + title: log.retention.check.interval.ms + helpLink: kafka.html + log_x_retention_x_hours: + description: How long, in hours, a log file is kept. + title: log.retention.hours + forcedType: int + helpLink: kafka.html + log_x_segment_x_bytes: + description: The maximum allowable size for a log file. + title: log.segment.bytes + forcedType: int + helpLink: kafka.html + node_x_id: + description: The node ID corresponds to the roles performed by this process whenever process.roles is populated. + title: node.id + forcedType: int + readonly: True + helpLink: kafka.html + num_x_io_x_threads: + description: The number of threads used by Kafka. + title: num.io.threads + forcedType: int + helpLink: kafka.html + num_x_network_x_threads: + description: The number of threads used for network communication. + title: num.network.threads + forcedType: int + helpLink: kafka.html + num_x_partitions: + description: The number of log partitions assigned per topic. + title: num.partitions + forcedType: int + helpLink: kafka.html + num_x_recovery_x_threads_x_per_x_data_x_dir: + description: The number of threads used for log recuperation at startup and purging at shutdown. This ammount of threads is used per data directory. + title: num.recovery.threads.per.data.dir + forcedType: int + helpLink: kafka.html + offsets_x_topic_x_replication_x_factor: + description: The offsets topic replication factor. + title: offsets.topic.replication.factor + forcedType: int + helpLink: kafka.html + process_x_roles: + description: The roles the process performs. Use a comma-seperated list is multiple. + title: process.roles + helpLink: kafka.html + socket_x_receive_x_buffer_x_bytes: + description: Size, in bytes of the SO_RCVBUF buffer. A value of -1 will use the OS default. + title: socket.receive.buffer.bytes + #forcedType: int - soc needs to allow -1 as an int before we can use this + helpLink: kafka.html + socket_x_request_x_max_x_bytes: + description: The maximum bytes allowed for a request to the socket. + title: socket.request.max.bytes + forcedType: int + helpLink: kafka.html + socket_x_send_x_buffer_x_bytes: + description: Size, in bytes of the SO_SNDBUF buffer. A value of -1 will use the OS default. + title: socket.send.buffer.byte + #forcedType: int - soc needs to allow -1 as an int before we can use this + helpLink: kafka.html + ssl_x_keystore_x_location: + description: The key store file location within the Docker container. + title: ssl.keystore.location + helpLink: kafka.html + ssl_x_keystore_x_password: + description: The key store file password. Invalid for PEM format. + title: ssl.keystore.password + sensitive: True + helpLink: kafka.html + ssl_x_keystore_x_type: + description: The key store file format. + title: ssl.keystore.type + regex: ^(JKS|PKCS12|PEM)$ + helpLink: kafka.html + ssl_x_truststore_x_location: + description: The trust store file location within the Docker container. + title: ssl.truststore.location + helpLink: kafka.html + ssl_x_truststore_x_password: + description: The trust store file password. If null, the trust store file is still use, but integrity checking is disabled. Invalid for PEM format. + title: ssl.truststore.password + sensitive: True + helpLink: kafka.html + transaction_x_state_x_log_x_min_x_isr: + description: Overrides min.insync.replicas for the transaction topic. When a producer configures acks to "all" (or "-1"), this setting determines the minimum number of replicas required to acknowledge a write as successful. Failure to meet this minimum triggers an exception (either NotEnoughReplicas or NotEnoughReplicasAfterAppend). When used in conjunction, min.insync.replicas and acks enable stronger durability guarantees. For instance, creating a topic with a replication factor of 3, setting min.insync.replicas to 2, and using acks of "all" ensures that the producer raises an exception if a majority of replicas fail to receive a write. + title: transaction.state.log.min.isr + forcedType: int + helpLink: kafka.html + transaction_x_state_x_log_x_replication_x_factor: + description: Set the replication factor higher for the transaction topic to ensure availability. Internal topic creation will not proceed until the cluster size satisfies this replication factor prerequisite. + title: transaction.state.log.replication.factor + forcedType: int + helpLink: kafka.html + client: + security_x_protocol: + description: Broker communication protocol. Options are: SASL_SSL, PLAINTEXT, SSL, SASL_PLAINTEXT + title: security.protocol + regex: ^(SASL_SSL|PLAINTEXT|SSL|SASL_PLAINTEXT) + helpLink: kafka.html + ssl_x_keystore_x_location: + description: The key store file location within the Docker container. + title: ssl.keystore.location + helpLink: kafka.html + ssl_x_keystore_x_password: + description: The key store file password. Invalid for PEM format. + title: ssl.keystore.password + sensitive: True + helpLink: kafka.html + ssl_x_keystore_x_type: + description: The key store file format. + title: ssl.keystore.type + regex: ^(JKS|PKCS12|PEM)$ + helpLink: kafka.html + ssl_x_truststore_x_location: + description: The trust store file location within the Docker container. + title: ssl.truststore.location + helpLink: kafka.html + ssl_x_truststore_x_password: + description: The trust store file password. If null, the trust store file is still use, but integrity checking is disabled. Invalid for PEM format. + title: ssl.truststore.password + sensitive: True + helpLink: kafka.html From d3bd56b131dfe372a04e012bc337333c0faca2bd Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 10 Apr 2024 14:13:27 -0400 Subject: [PATCH 376/777] disable logstash and redis if kafka enabled --- salt/kafka/soc_kafka.yaml | 2 +- salt/logstash/init.sls | 3 ++- salt/redis/init.sls | 3 ++- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/salt/kafka/soc_kafka.yaml b/salt/kafka/soc_kafka.yaml index c16ff212e..2fec8c302 100644 --- a/salt/kafka/soc_kafka.yaml +++ b/salt/kafka/soc_kafka.yaml @@ -135,7 +135,7 @@ kafka: helpLink: kafka.html client: security_x_protocol: - description: Broker communication protocol. Options are: SASL_SSL, PLAINTEXT, SSL, SASL_PLAINTEXT + description: 'Broker communication protocol. Options are: SASL_SSL, PLAINTEXT, SSL, SASL_PLAINTEXT' title: security.protocol regex: ^(SASL_SSL|PLAINTEXT|SSL|SASL_PLAINTEXT) helpLink: kafka.html diff --git a/salt/logstash/init.sls b/salt/logstash/init.sls index 62b2a2ebb..f7adc1330 100644 --- a/salt/logstash/init.sls +++ b/salt/logstash/init.sls @@ -4,9 +4,10 @@ # Elastic License 2.0. {% from 'logstash/map.jinja' import LOGSTASH_MERGED %} +{% from 'kafka/map.jinja' import KAFKAMERGED %} include: -{% if LOGSTASH_MERGED.enabled %} +{% if LOGSTASH_MERGED.enabled and not KAFKAMERGED.enabled %} - logstash.enabled {% else %} - logstash.disabled diff --git a/salt/redis/init.sls b/salt/redis/init.sls index 2f7f38dcc..4936c3254 100644 --- a/salt/redis/init.sls +++ b/salt/redis/init.sls @@ -4,9 +4,10 @@ # Elastic License 2.0. {% from 'redis/map.jinja' import REDISMERGED %} +{% from 'kafka/map.jinja' import KAFKAMERGED %} include: -{% if REDISMERGED.enabled %} +{% if REDISMERGED.enabled and not KAFKAMERGED.enabled %} - redis.enabled {% else %} - redis.disabled From 4097e1d81ab07f4f9e9b3e55599b9b66b63ae7a1 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 10 Apr 2024 16:10:27 -0400 Subject: [PATCH 377/777] Create mappings for Kismet integration Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/elasticsearch/defaults.yaml | 4 ++ .../templates/component/ecs/device.json | 36 ++++++++++++++++++ .../templates/component/ecs/kismet.json | 22 +++++++++-- .../templates/component/ecs/network.json | 37 +++++++++++++++++++ 4 files changed, 96 insertions(+), 3 deletions(-) create mode 100644 salt/elasticsearch/templates/component/ecs/device.json diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index 048dd0c7f..db1255dad 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -10496,6 +10496,10 @@ elasticsearch: index_template: composed_of: - kismet-mappings + - source-mappings + - client-mappings + - device-mappings + - network-mappings - so-fleet_globals-1 - so-fleet_agent_id_verification-1 data_stream: diff --git a/salt/elasticsearch/templates/component/ecs/device.json b/salt/elasticsearch/templates/component/ecs/device.json new file mode 100644 index 000000000..a281f2c1e --- /dev/null +++ b/salt/elasticsearch/templates/component/ecs/device.json @@ -0,0 +1,36 @@ +{ + "_meta": { + "documentation": "https://www.elastic.co/guide/en/ecs/current/ecs-device.html", + "ecs_version": "1.12.2" + }, + "template": { + "mappings": { + "properties": { + "device": { + "properties": { + "id": { + "ignore_above": 1024, + "type": "keyword" + }, + "manufacturer": { + "ignore_above": 1024, + "type": "keyword" + }, + "model": { + "properties": { + "identifier": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + } + } + } + } + } + } + } + } +} \ No newline at end of file diff --git a/salt/elasticsearch/templates/component/ecs/kismet.json b/salt/elasticsearch/templates/component/ecs/kismet.json index d388b7127..a03236ab8 100644 --- a/salt/elasticsearch/templates/component/ecs/kismet.json +++ b/salt/elasticsearch/templates/component/ecs/kismet.json @@ -6,9 +6,25 @@ "template": { "mappings": { "properties": { - "kismet_mapping_placeholder": { - "type": "keyword", - "ignore_above": 1024 + "kismet": { + "properties": { + "alerts": { + "properties": { + "count": { + "type": "long" + } + } + }, + "first_seen": { + "type": "date" + }, + "last_seen": { + "type": "date" + }, + "seenby": { + "type": "nested" + } + } } } } diff --git a/salt/elasticsearch/templates/component/ecs/network.json b/salt/elasticsearch/templates/component/ecs/network.json index c2e35efd0..cc0f9d288 100644 --- a/salt/elasticsearch/templates/component/ecs/network.json +++ b/salt/elasticsearch/templates/component/ecs/network.json @@ -77,6 +77,43 @@ "type": "keyword" } } + }, + "wireless": { + "properties": { + "associated_clients": { + "ignore_above": 1024, + "type": "keyword" + }, + "bssid": { + "ignore_above": 1024, + "type": "keyword" + }, + "channel": { + "ignore_above": 1024, + "type": "keyword" + }, + "channel_utilization": { + "type": "float" + }, + "frequency": { + "type": "double" + }, + "ssid": { + "ignore_above": 1024, + "type": "keyword" + }, + "ssid_cloaked": { + "type": "boolean" + }, + "known_connected_bssid": { + "ignore_above": 1024, + "type": "keyword" + }, + "last_connected_bssid": { + "ignore_above": 1024, + "type": "keyword" + } + } } } } From 2ab9cbba6131c508ee27293e759b6e62753c323d Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 10 Apr 2024 16:12:22 -0400 Subject: [PATCH 378/777] Update wording for Kismet poll interval annotation Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/elasticfleet/soc_elasticfleet.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticfleet/soc_elasticfleet.yaml b/salt/elasticfleet/soc_elasticfleet.yaml index 206febcd7..7ed97e6ec 100644 --- a/salt/elasticfleet/soc_elasticfleet.yaml +++ b/salt/elasticfleet/soc_elasticfleet.yaml @@ -87,7 +87,7 @@ elasticfleet: advanced: True forcedType: string poll_interval: - description: Poll interval for wireless device data from Kismet. Integration is currently configured to report devices seen as active by any Kismet sensor within the last 600 seconds of polling. + description: Poll interval for wireless device data from Kismet. Integration is currently configured to return devices seen as active by any Kismet sensor within the last 10 minutes. global: True helpLink: elastic-fleet.html advanced: True From 7124f041388310e191c30fa97336c5f8f94c278c Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 10 Apr 2024 16:13:06 -0400 Subject: [PATCH 379/777] Update ingest pipelines to match updated mappings Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/elasticsearch/files/ingest/kismet.ad_hoc | 2 +- salt/elasticsearch/files/ingest/kismet.ap | 14 +++++++------- salt/elasticsearch/files/ingest/kismet.bridged | 2 +- salt/elasticsearch/files/ingest/kismet.client | 4 ++-- salt/elasticsearch/files/ingest/kismet.common | 4 ++-- salt/elasticsearch/files/ingest/kismet.wds_ap | 4 ++-- 6 files changed, 15 insertions(+), 15 deletions(-) diff --git a/salt/elasticsearch/files/ingest/kismet.ad_hoc b/salt/elasticsearch/files/ingest/kismet.ad_hoc index 8cbc9cd2b..adfbd7901 100644 --- a/salt/elasticsearch/files/ingest/kismet.ad_hoc +++ b/salt/elasticsearch/files/ingest/kismet.ad_hoc @@ -3,7 +3,7 @@ { "rename": { "field": "message2.kismet_device_base_macaddr", - "target_field": "wireless.bssid" + "target_field": "network.wireless.bssid" } } ] diff --git a/salt/elasticsearch/files/ingest/kismet.ap b/salt/elasticsearch/files/ingest/kismet.ap index 1b8cbb80e..107f924fd 100644 --- a/salt/elasticsearch/files/ingest/kismet.ap +++ b/salt/elasticsearch/files/ingest/kismet.ap @@ -3,35 +3,35 @@ { "rename": { "field": "message2.dot11_device.dot11_device_last_beaconed_ssid_record.dot11_advertisedssid_cloaked", - "target_field": "wireless.ssid_cloaked", + "target_field": "network.wireless.ssid_cloaked", "if": "ctx?.message2?.dot11_device?.dot11_device_last_beaconed_ssid_record?.dot11_advertisedssid_cloaked != null" } }, { "rename": { "field": "message2.dot11_device.dot11_device_last_beaconed_ssid_record.dot11_advertisedssid_ssid", - "target_field": "wireless.ssid", + "target_field": "network.wireless.ssid", "if": "ctx?.message2?.dot11_device?.dot11_device_last_beaconed_ssid_record?.dot11_advertisedssid_ssid != null" } }, { "set": { - "field": "wireless.ssid", + "field": "network.wireless.ssid", "value": "Hidden", - "if": "ctx?.wireless?.ssid_cloaked != null && ctx?.wireless?.ssid_cloaked == 1" + "if": "ctx?.network?.wireless?.ssid_cloaked != null && ctx?.network?.wireless?.ssid_cloaked == 1" } }, { "rename": { "field": "message2.dot11_device.dot11_device_last_beaconed_ssid_record.dot11_advertisedssid_dot11e_channel_utilization_perc", - "target_field": "wireless.channel_utilization", + "target_field": "network.network.wireless.channel_utilization", "if": "ctx?.message2?.dot11_device?.dot11_device_last_beaconed_ssid_record?.dot11_advertisedssid_dot11e_channel_utilization_perc != null" } }, { "rename": { "field": "message2.dot11_device.dot11_device_last_bssid", - "target_field": "wireless.bssid" + "target_field": "network.wireless.bssid" } }, { @@ -39,7 +39,7 @@ "field": "message2.dot11_device.dot11_device_associated_client_map", "processor": { "append": { - "field": "wireless.associated_clients", + "field": "network.wireless.associated_clients", "value": "{{_ingest._key}}" } }, diff --git a/salt/elasticsearch/files/ingest/kismet.bridged b/salt/elasticsearch/files/ingest/kismet.bridged index 5eee3b78c..b61635e3a 100644 --- a/salt/elasticsearch/files/ingest/kismet.bridged +++ b/salt/elasticsearch/files/ingest/kismet.bridged @@ -9,7 +9,7 @@ { "rename": { "field": "message2.dot11_device.dot11_device_last_bssid", - "target_field": "wireless.bssid" + "target_field": "network.wireless.bssid" } } ] diff --git a/salt/elasticsearch/files/ingest/kismet.client b/salt/elasticsearch/files/ingest/kismet.client index 8b3d3069b..6da0a071b 100644 --- a/salt/elasticsearch/files/ingest/kismet.client +++ b/salt/elasticsearch/files/ingest/kismet.client @@ -9,7 +9,7 @@ { "rename": { "field": "message2.dot11_device.dot11_device_last_bssid", - "target_field": "wireless.last_connected_bssid", + "target_field": "network.wireless.last_connected_bssid", "if": "ctx?.message2?.dot11_device?.dot11_device_last_bssid != null" } }, @@ -18,7 +18,7 @@ "field": "message2.dot11_device.dot11_device_client_map", "processor": { "append": { - "field": "wireless.known_connected_bssid", + "field": "network.wireless.known_connected_bssid", "value": "{{_ingest._key}}" } }, diff --git a/salt/elasticsearch/files/ingest/kismet.common b/salt/elasticsearch/files/ingest/kismet.common index 95eb29b73..368e7601a 100644 --- a/salt/elasticsearch/files/ingest/kismet.common +++ b/salt/elasticsearch/files/ingest/kismet.common @@ -73,14 +73,14 @@ { "rename": { "field": "message2.kismet_device_base_channel", - "target_field": "wireless.channel", + "target_field": "network.wireless.channel", "if": "ctx?.message2?.kismet_device_base_channel != ''" } }, { "rename": { "field": "message2.kismet_device_base_frequency", - "target_field": "wireless.frequency", + "target_field": "network.wireless.frequency", "if": "ctx?.message2?.kismet_device_base_frequency != 0" } }, diff --git a/salt/elasticsearch/files/ingest/kismet.wds_ap b/salt/elasticsearch/files/ingest/kismet.wds_ap index 7f43d43fd..4d10b211b 100644 --- a/salt/elasticsearch/files/ingest/kismet.wds_ap +++ b/salt/elasticsearch/files/ingest/kismet.wds_ap @@ -3,7 +3,7 @@ { "rename": { "field": "message2.kismet_device_base_commonname", - "target_field": "wireless.bssid" + "target_field": "network.wireless.bssid" } }, { @@ -11,7 +11,7 @@ "field": "message2.dot11_device.dot11_device_associated_client_map", "processor": { "append": { - "field": "wireless.associated_clients", + "field": "network.wireless.associated_clients", "value": "{{_ingest._key}}" } }, From ed97aa4e78e69c2da298d8881f3627c4dc4e25b5 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Thu, 11 Apr 2024 08:21:20 -0400 Subject: [PATCH 380/777] Enable Detections Adv by default --- salt/soc/defaults.yaml | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index e1d4d1726..ac1fc1993 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -2088,6 +2088,7 @@ soc: - red customEnabled: false detections: + advanced: true viewEnabled: true createLink: /detection/create eventFetchLimit: 500 @@ -2113,23 +2114,35 @@ soc: - soc_timestamp queries: - name: "All Detections" - query: "_id:*" + query: "_id:* | groupby so_detection.language | groupby so_detection.ruleset so_detection.isEnabled" + description: Show all Detections, community and custom - name: "Custom Detections" - query: "so_detection.isCommunity:false" + query: "so_detection.isCommunity:false AND NOT so_detection.ruleset: securityonion-resources" + description: Show all custom detections - name: "All Detections - Enabled" - query: "so_detection.isEnabled:true" + query: "so_detection.isEnabled:true | groupby so_detection.language | groupby so_detection.ruleset so_detection.severity" + description: Show all enalbed Detections - name: "All Detections - Disabled" - query: "so_detection.isEnabled:false" + query: "so_detection.isEnabled:false | groupby so_detection.language | groupby so_detection.ruleset so_detection.severity" + description: Show all disabled Detections - name: "Detection Type - Suricata (NIDS)" - query: "so_detection.language:suricata" + query: "so_detection.language:suricata | groupby so_detection.ruleset so_detection.isEnabled" + description: Show all NIDS Detections, which are run with Suricata - name: "Detection Type - Sigma (Elastalert) - All" - query: "so_detection.language:sigma" + query: "so_detection.language:sigma | groupby so_detection.ruleset so_detection.isEnabled" + description: Show all Sigma Detections, which are run with Elastalert - name: "Detection Type - Sigma (Elastalert) - Windows" - query: 'so_detection.language:sigma AND so_detection.content: "*product: windows*"' + query: 'so_detection.language:sigma AND so_detection.content: "*product: windows*" | groupby so_detection.ruleset so_detection.isEnabled' + description: Show all Sigma Detections with a logsource of Windows - name: "Detection Type - YARA (Strelka)" - query: "so_detection.language:yara" + query: "so_detection.language:yara | groupby so_detection.ruleset so_detection.isEnabled" + description: Show all YARA detections, which are used by Strelka - name: "Security Onion - Grid Detections" query: "so_detection.ruleset:securityonion-resources" + description: Show Detections for this Security Onion Grid + - name: "Detections with Overrides" + query: "_exists_:so_detection.overrides | groupby so_detection.language | groupby so_detection.ruleset so_detection.isEnabled" + description: Show Detections that have Overrides detection: presets: severity: From 1c5f02ade28b0f18d2cb69675d0447cf2be0305e Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Thu, 11 Apr 2024 09:21:08 -0400 Subject: [PATCH 381/777] Update annotations --- salt/soc/soc_soc.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index b550b62c5..ed9acc47a 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -293,6 +293,7 @@ soc: alerts: *appSettings cases: *appSettings dashboards: *appSettings + detections: *appSettings grid: maxUploadSize: description: The maximum number of bytes for an uploaded PCAP import file. From fd689a4607a8f416b8cbf86905647cd8940ec47c Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 11 Apr 2024 11:18:04 -0400 Subject: [PATCH 382/777] Fix typo in ingest pipeline Test to fix duplicate events in SOC, by removing conflicting field event.created Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/elasticsearch/files/ingest/kismet.ap | 2 +- salt/elasticsearch/files/ingest/kismet.common | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/salt/elasticsearch/files/ingest/kismet.ap b/salt/elasticsearch/files/ingest/kismet.ap index 107f924fd..a864c09e4 100644 --- a/salt/elasticsearch/files/ingest/kismet.ap +++ b/salt/elasticsearch/files/ingest/kismet.ap @@ -24,7 +24,7 @@ { "rename": { "field": "message2.dot11_device.dot11_device_last_beaconed_ssid_record.dot11_advertisedssid_dot11e_channel_utilization_perc", - "target_field": "network.network.wireless.channel_utilization", + "target_field": "network.wireless.channel_utilization", "if": "ctx?.message2?.dot11_device?.dot11_device_last_beaconed_ssid_record?.dot11_advertisedssid_dot11e_channel_utilization_perc != null" } }, diff --git a/salt/elasticsearch/files/ingest/kismet.common b/salt/elasticsearch/files/ingest/kismet.common index 368e7601a..14d439105 100644 --- a/salt/elasticsearch/files/ingest/kismet.common +++ b/salt/elasticsearch/files/ingest/kismet.common @@ -149,7 +149,8 @@ "device_type", "wifi", "agent", - "host" + "host", + "event.created" ], "ignore_failure": true } From 68e016090b2f456d62d280ffb4441ac47f62c2d0 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 11 Apr 2024 13:21:54 -0400 Subject: [PATCH 383/777] Fix network.wireless.ssid not parsing Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/elasticsearch/templates/component/ecs/network.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/templates/component/ecs/network.json b/salt/elasticsearch/templates/component/ecs/network.json index cc0f9d288..8cc6bdc37 100644 --- a/salt/elasticsearch/templates/component/ecs/network.json +++ b/salt/elasticsearch/templates/component/ecs/network.json @@ -103,7 +103,7 @@ "type": "keyword" }, "ssid_cloaked": { - "type": "boolean" + "type": "integer" }, "known_connected_bssid": { "ignore_above": 1024, From c269fb90acc4b56cfb2814105ec0e746de9808d6 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 11 Apr 2024 14:41:54 -0400 Subject: [PATCH 384/777] Added a Kismet Wifi devices dashboard for an overview of kismet data Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/soc/defaults.yaml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 987011c99..0826f4957 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1180,6 +1180,13 @@ soc: - soc_timestamp - event.dataset - message + ':kismet:': + - soc_timestamp + - device.manufacturer + - client.mac + - network.wireless.ssid + - network.wireless.bssid + - event.dataset server: bindAddress: 0.0.0.0:9822 baseUrl: / @@ -1819,6 +1826,9 @@ soc: - name: GeoIP - Source Organizations description: GeoIP tagged logs visualized by source organizations query: '* AND _exists_:source_geo.organization_name | groupby source_geo.organization_name | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby source.geo.country_name | groupby event.dataset | groupby event.module' + - name: Kismet - WiFi Devices + description: WiFi devices seen by Kismet sensors + query: 'event.module: kismet | groupby network.wireless.ssid | groupby device.manufacturer | groupby -pie device.manufacturer | groupby event.dataset' job: alerts: advanced: false From af53dcda1bc064c7dba0a4fb1927007e7280c2ab Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 11 Apr 2024 15:32:00 -0400 Subject: [PATCH 385/777] Remove references to kafkanode Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- .../assigned_hostgroups.local.map.yaml | 3 +- pillar/kafka/nodes.sls | 2 +- pillar/logstash/nodes.sls | 2 +- pillar/top.sls | 9 +- salt/allowed_states.map.jinja | 14 +-- salt/firewall/containers.map.jinja | 6 +- salt/firewall/defaults.yaml | 92 +------------------ salt/firewall/soc_firewall.yaml | 62 ------------- salt/kafka/enabled.sls | 2 +- salt/logstash/config.sls | 2 +- salt/logstash/defaults.yaml | 4 - salt/logstash/enabled.sls | 2 +- .../config/so/0800_input_kafka.conf.jinja | 4 +- salt/logstash/soc_logstash.yaml | 2 - salt/manager/tools/sbin/so-firewall-minion | 3 - salt/manager/tools/sbin/so-minion | 5 - salt/ssl/init.sls | 5 +- salt/top.sls | 9 -- setup/so-functions | 4 +- setup/so-whiptail | 3 - 20 files changed, 17 insertions(+), 218 deletions(-) diff --git a/files/firewall/assigned_hostgroups.local.map.yaml b/files/firewall/assigned_hostgroups.local.map.yaml index fca293d3a..025b32131 100644 --- a/files/firewall/assigned_hostgroups.local.map.yaml +++ b/files/firewall/assigned_hostgroups.local.map.yaml @@ -19,5 +19,4 @@ role: receiver: standalone: searchnode: - sensor: - kafkanode: \ No newline at end of file + sensor: \ No newline at end of file diff --git a/pillar/kafka/nodes.sls b/pillar/kafka/nodes.sls index b1842834c..6fe64685d 100644 --- a/pillar/kafka/nodes.sls +++ b/pillar/kafka/nodes.sls @@ -1,4 +1,4 @@ -{% set current_kafkanodes = salt.saltutil.runner('mine.get', tgt='G@role:so-kafkanode or G@role:so-manager', fun='network.ip_addrs', tgt_type='compound') %} +{% set current_kafkanodes = salt.saltutil.runner('mine.get', tgt='G@role:so-receiver or G@role:so-manager', fun='network.ip_addrs', tgt_type='compound') %} {% set pillar_kafkanodes = salt['pillar.get']('kafka:nodes', default={}, merge=True) %} {% set existing_ids = [] %} diff --git a/pillar/logstash/nodes.sls b/pillar/logstash/nodes.sls index 99fbb857c..a77978821 100644 --- a/pillar/logstash/nodes.sls +++ b/pillar/logstash/nodes.sls @@ -2,7 +2,7 @@ {% set cached_grains = salt.saltutil.runner('cache.grains', tgt='*') %} {% for minionid, ip in salt.saltutil.runner( 'mine.get', - tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-searchnode or G@role:so-heavynode or G@role:so-receiver or G@role:so-fleet or G@role:so-kafkanode ', + tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-searchnode or G@role:so-heavynode or G@role:so-receiver or G@role:so-fleet ', fun='network.ip_addrs', tgt_type='compound') | dictsort() %} diff --git a/pillar/top.sls b/pillar/top.sls index 61f4f338f..817767bf7 100644 --- a/pillar/top.sls +++ b/pillar/top.sls @@ -233,15 +233,8 @@ base: - redis.adv_redis - minions.{{ grains.id }} - minions.adv_{{ grains.id }} - - '*_kafkanode': - - logstash.nodes - - logstash.soc_logstash - - logstash.adv_logstash - - minions.{{ grains.id }} - - minions.adv_{{ grains.id }} - - secrets - kafka.nodes + - secrets '*_import': - secrets diff --git a/salt/allowed_states.map.jinja b/salt/allowed_states.map.jinja index 6fa60c2ea..0fa968658 100644 --- a/salt/allowed_states.map.jinja +++ b/salt/allowed_states.map.jinja @@ -188,16 +188,8 @@ 'telegraf', 'firewall', 'schedule', - 'docker_clean' - ], - 'so-kafkanode': [ - 'kafka', - 'logstash', - 'ssl', - 'telegraf', - 'firewall', - 'schedule', - 'docker_clean' + 'docker_clean', + 'kafka' ], 'so-desktop': [ 'ssl', @@ -214,7 +206,7 @@ {% do allowed_states.append('strelka') %} {% endif %} - {% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-import', 'so-kafkanode'] %} + {% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-import'] %} {% do allowed_states.append('elasticsearch') %} {% endif %} diff --git a/salt/firewall/containers.map.jinja b/salt/firewall/containers.map.jinja index 7efb9abab..02a1b7cac 100644 --- a/salt/firewall/containers.map.jinja +++ b/salt/firewall/containers.map.jinja @@ -81,11 +81,7 @@ {% set NODE_CONTAINERS = [ 'so-logstash', 'so-redis', -] %} -{% elif GLOBALS.role == 'so-kafkanode' %} -{% set NODE_CONTAINERS = [ - 'so-logstash', - 'so-kafka', + 'so-kafka' ] %} {% elif GLOBALS.role == 'so-idh' %} diff --git a/salt/firewall/defaults.yaml b/salt/firewall/defaults.yaml index e51bf5825..0b6d06eda 100644 --- a/salt/firewall/defaults.yaml +++ b/salt/firewall/defaults.yaml @@ -19,7 +19,6 @@ firewall: manager: [] managersearch: [] receiver: [] - kafkanode: [] searchnode: [] self: [] sensor: [] @@ -443,15 +442,6 @@ firewall: - elastic_agent_data - elastic_agent_update - sensoroni - kafkanode: - portgroups: - - yum - - docker_registry - - influxdb - - elastic_agent_control - - elastic_agent_data - - elastic_agent_update - - sensoroni analyst: portgroups: - nginx @@ -530,9 +520,6 @@ firewall: receiver: portgroups: - salt_manager - kafkanode: - portgroups: - - salt_manager desktop: portgroups: - salt_manager @@ -647,15 +634,6 @@ firewall: - elastic_agent_data - elastic_agent_update - sensoroni - kafkanode: - portgroups: - - yum - - docker_registry - - influxdb - - elastic_agent_control - - elastic_agent_data - - elastic_agent_update - - sensoroni analyst: portgroups: - nginx @@ -1305,14 +1283,17 @@ firewall: - beats_5044 - beats_5644 - elastic_agent_data + - kafka searchnode: portgroups: - redis - beats_5644 + - kafka managersearch: portgroups: - redis - beats_5644 + - kafka self: portgroups: - redis @@ -1383,73 +1364,6 @@ firewall: portgroups: [] customhostgroup9: portgroups: [] - kafkanode: - chain: - DOCKER-USER: - hostgroups: - searchnode: - portgroups: - - kafka - kafkanode: - portgroups: - - kafka - customhostgroup0: - portgroups: [] - customhostgroup1: - portgroups: [] - customhostgroup2: - portgroups: [] - customhostgroup3: - portgroups: [] - customhostgroup4: - portgroups: [] - customhostgroup5: - portgroups: [] - customhostgroup6: - portgroups: [] - customhostgroup7: - portgroups: [] - customhostgroup8: - portgroups: [] - customhostgroup9: - portgroups: [] - INPUT: - hostgroups: - anywhere: - portgroups: - - ssh - dockernet: - portgroups: - - all - localhost: - portgroups: - - all - self: - portgroups: - - syslog - syslog: - portgroups: - - syslog - customhostgroup0: - portgroups: [] - customhostgroup1: - portgroups: [] - customhostgroup2: - portgroups: [] - customhostgroup3: - portgroups: [] - customhostgroup4: - portgroups: [] - customhostgroup5: - portgroups: [] - customhostgroup6: - portgroups: [] - customhostgroup7: - portgroups: [] - customhostgroup8: - portgroups: [] - customhostgroup9: - portgroups: [] idh: chain: DOCKER-USER: diff --git a/salt/firewall/soc_firewall.yaml b/salt/firewall/soc_firewall.yaml index 3e4c4355f..28791a705 100644 --- a/salt/firewall/soc_firewall.yaml +++ b/salt/firewall/soc_firewall.yaml @@ -34,7 +34,6 @@ firewall: heavynode: *hostgroupsettings idh: *hostgroupsettings import: *hostgroupsettings - kafkanode: *hostgroupsettings localhost: *ROhostgroupsettingsadv manager: *hostgroupsettings managersearch: *hostgroupsettings @@ -361,8 +360,6 @@ firewall: portgroups: *portgroupsdocker endgame: portgroups: *portgroupsdocker - kafkanode: - portgroups: *portgroupsdocker analyst: portgroups: *portgroupsdocker desktop: @@ -454,8 +451,6 @@ firewall: portgroups: *portgroupsdocker syslog: portgroups: *portgroupsdocker - kafkanode: - portgroups: *portgroupsdocker analyst: portgroups: *portgroupsdocker desktop: @@ -940,63 +935,6 @@ firewall: portgroups: *portgroupshost customhostgroup9: portgroups: *portgroupshost - kafkanode: - chain: - DOCKER-USER: - hostgroups: - searchnode: - portgroups: *portgroupsdocker - kafkanode: - portgroups: *portgroupsdocker - customhostgroup0: - portgroups: *portgroupsdocker - customhostgroup1: - portgroups: *portgroupsdocker - customhostgroup2: - portgroups: *portgroupsdocker - customhostgroup3: - portgroups: *portgroupsdocker - customhostgroup4: - portgroups: *portgroupsdocker - customhostgroup5: - portgroups: *portgroupsdocker - customhostgroup6: - portgroups: *portgroupsdocker - customhostgroup7: - portgroups: *portgroupsdocker - customhostgroup8: - portgroups: *portgroupsdocker - customhostgroup9: - portgroups: *portgroupsdocker - INPUT: - hostgroups: - anywhere: - portgroups: *portgroupshost - dockernet: - portgroups: *portgroupshost - localhost: - portgroups: *portgroupshost - customhostgroup0: - portgroups: *portgroupshost - customhostgroup1: - portgroups: *portgroupshost - customhostgroup2: - portgroups: *portgroupshost - customhostgroup3: - portgroups: *portgroupshost - customhostgroup4: - portgroups: *portgroupshost - customhostgroup5: - portgroups: *portgroupshost - customhostgroup6: - portgroups: *portgroupshost - customhostgroup7: - portgroups: *portgroupshost - customhostgroup8: - portgroups: *portgroupshost - customhostgroup9: - portgroups: *portgroupshost - idh: chain: DOCKER-USER: diff --git a/salt/kafka/enabled.sls b/salt/kafka/enabled.sls index c2fca70db..ed26297b3 100644 --- a/salt/kafka/enabled.sls +++ b/salt/kafka/enabled.sls @@ -7,7 +7,7 @@ {% if sls.split('.')[0] in allowed_states %} {% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'docker/docker.map.jinja' import DOCKER %} -{% set KAFKANODES = salt['pillar.get']('kafka:nodes', {}) %} +{% set KAFKANODES = salt['pillar.get']('kafka:nodes', {}) %} include: - kafka.sostatus diff --git a/salt/logstash/config.sls b/salt/logstash/config.sls index 402d1ef20..8a59c83b7 100644 --- a/salt/logstash/config.sls +++ b/salt/logstash/config.sls @@ -12,7 +12,7 @@ include: - ssl - {% if GLOBALS.role not in ['so-receiver','so-fleet', 'so-kafkanode'] %} + {% if GLOBALS.role not in ['so-receiver','so-fleet'] %} - elasticsearch {% endif %} diff --git a/salt/logstash/defaults.yaml b/salt/logstash/defaults.yaml index 3ca4570fd..348acb622 100644 --- a/salt/logstash/defaults.yaml +++ b/salt/logstash/defaults.yaml @@ -19,8 +19,6 @@ logstash: - search fleet: - fleet - kafkanode: - - kafkanode defined_pipelines: fleet: - so/0012_input_elastic_agent.conf.jinja @@ -39,8 +37,6 @@ logstash: - so/0900_input_redis.conf.jinja - so/9805_output_elastic_agent.conf.jinja - so/9900_output_endgame.conf.jinja - kafkanode: - - so/0899_output_kafka.conf.jinja custom0: [] custom1: [] custom2: [] diff --git a/salt/logstash/enabled.sls b/salt/logstash/enabled.sls index fcc2ec190..3881ef1f4 100644 --- a/salt/logstash/enabled.sls +++ b/salt/logstash/enabled.sls @@ -75,7 +75,7 @@ so-logstash: {% else %} - /etc/pki/tls/certs/intca.crt:/usr/share/filebeat/ca.crt:ro {% endif %} - {% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-searchnode', 'so-kafkanode' ] %} + {% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-searchnode' ] %} - /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro - /opt/so/conf/ca/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro - /etc/pki/kafka-logstash.p12:/usr/share/logstash/kafka-logstash.p12:ro diff --git a/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja b/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja index 1391ce983..85e6729e2 100644 --- a/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja +++ b/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja @@ -1,11 +1,9 @@ -{% set kafka_brokers = salt['pillar.get']('logstash:nodes:kafkanode', {}) %} +{% set kafka_brokers = salt['pillar.get']('logstash:nodes:receiver', {}) %} {% set kafka_on_mngr = salt ['pillar.get']('logstash:nodes:manager', {}) %} {% set broker_ips = [] %} {% for node, node_data in kafka_brokers.items() %} {% do broker_ips.append(node_data['ip'] + ":9092") %} {% endfor %} - -{# For testing kafka stuff from manager not dedicated kafkanodes #} {% for node, node_data in kafka_on_mngr.items() %} {% do broker_ips.append(node_data['ip'] + ":9092") %} {% endfor %} diff --git a/salt/logstash/soc_logstash.yaml b/salt/logstash/soc_logstash.yaml index 82fb25bec..3172ff7c5 100644 --- a/salt/logstash/soc_logstash.yaml +++ b/salt/logstash/soc_logstash.yaml @@ -16,7 +16,6 @@ logstash: manager: *assigned_pipelines managersearch: *assigned_pipelines fleet: *assigned_pipelines - kafkanode: *assigned_pipelines defined_pipelines: receiver: &defined_pipelines description: List of pipeline configurations assign to this group. @@ -27,7 +26,6 @@ logstash: fleet: *defined_pipelines manager: *defined_pipelines search: *defined_pipelines - kafkanode: *defined_pipelines custom0: *defined_pipelines custom1: *defined_pipelines custom2: *defined_pipelines diff --git a/salt/manager/tools/sbin/so-firewall-minion b/salt/manager/tools/sbin/so-firewall-minion index 3357e5185..66a0afcea 100755 --- a/salt/manager/tools/sbin/so-firewall-minion +++ b/salt/manager/tools/sbin/so-firewall-minion @@ -79,9 +79,6 @@ fi 'RECEIVER') so-firewall includehost receiver "$IP" --apply ;; - 'KAFKANODE') - so-firewall includehost kafkanode "$IP" --apply - ;; 'DESKTOP') so-firewall includehost desktop "$IP" --apply ;; diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index 7b3e6fd3e..34e069ece 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -565,11 +565,6 @@ function createRECEIVER() { add_telegraf_to_minion } -function createKAFKANODE() { - add_logstash_to_minion - # add_telegraf_to_minion -} - function createDESKTOP() { add_desktop_to_minion add_telegraf_to_minion diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index 90f9cc64f..f337d62cb 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -664,10 +664,7 @@ elastickeyperms: {%- endif %} -# Roles will need to be modified. Below is just for testing encrypted kafka pipelines -# Remove so-manager. Just inplace for testing -{% if grains['role'] in ['so-manager', 'so-kafkanode', 'so-searchnode'] %} -# Create a cert for Redis encryption +{% if grains['role'] in ['so-manager', 'so-searchnode', 'so-receiver'] %} kafka_key: x509.private_key_managed: - name: /etc/pki/kafka.key diff --git a/salt/top.sls b/salt/top.sls index 289dd462b..ec5e4d738 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -235,16 +235,7 @@ base: - firewall - logstash - redis - - elasticfleet.install_agent_grid - - '*_kafkanode and G@saltversion:{{saltversion}}': - - match: compound - kafka - - logstash - - ssl - - telegraf - - firewall - - docker_clean - elasticfleet.install_agent_grid '*_idh and G@saltversion:{{saltversion}}': diff --git a/setup/so-functions b/setup/so-functions index 070711d63..a669c52fc 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1143,7 +1143,7 @@ get_redirect() { get_minion_type() { local minion_type case "$install_type" in - 'EVAL' | 'MANAGERSEARCH' | 'MANAGER' | 'SENSOR' | 'HEAVYNODE' | 'SEARCHNODE' | 'FLEET' | 'IDH' | 'STANDALONE' | 'IMPORT' | 'RECEIVER' | 'DESKTOP' | 'KAFKANODE') + 'EVAL' | 'MANAGERSEARCH' | 'MANAGER' | 'SENSOR' | 'HEAVYNODE' | 'SEARCHNODE' | 'FLEET' | 'IDH' | 'STANDALONE' | 'IMPORT' | 'RECEIVER' | 'DESKTOP') minion_type=$(echo "$install_type" | tr '[:upper:]' '[:lower:]') ;; esac @@ -1505,8 +1505,6 @@ process_installtype() { is_import=true elif [ "$install_type" = 'RECEIVER' ]; then is_receiver=true - elif [ "$install_type" = 'KAFKANODE' ]; then - is_kafka=true elif [ "$install_type" = 'DESKTOP' ]; then is_desktop=true fi diff --git a/setup/so-whiptail b/setup/so-whiptail index a732a9c97..fd9625ec4 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -681,7 +681,6 @@ whiptail_install_type_dist_existing() { "HEAVYNODE" "Sensor + Search Node " \ "IDH" "Intrusion Detection Honeypot Node " \ "RECEIVER" "Receiver Node " \ - "KAFKANODE" "Kafka Broker + Kraft controller" \ 3>&1 1>&2 2>&3 # "HOTNODE" "Add Hot Node (Uses Elastic Clustering)" \ # TODO # "WARMNODE" "Add Warm Node to existing Hot or Search node" \ # TODO @@ -712,8 +711,6 @@ whiptail_install_type_dist_existing() { is_import=true elif [ "$install_type" = 'RECEIVER' ]; then is_receiver=true - elif [ "$install_type" = 'KAFKANODE' ]; then - is_kafka=true elif [ "$install_type" = 'DESKTOP' ]; then is_desktop=true fi From ca7253a5896f2a028021339a04dccd9eabb3b863 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 11 Apr 2024 15:38:03 -0400 Subject: [PATCH 386/777] Run kafka-clusterid script when pillar values are missing Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/manager/tools/sbin/so-kafka-clusterid | 8 +++++--- setup/so-functions | 4 +++- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/salt/manager/tools/sbin/so-kafka-clusterid b/salt/manager/tools/sbin/so-kafka-clusterid index 719973247..fcbe3ba42 100644 --- a/salt/manager/tools/sbin/so-kafka-clusterid +++ b/salt/manager/tools/sbin/so-kafka-clusterid @@ -16,7 +16,9 @@ fi if ! grep -q "^ kafka_cluster_id:" $local_salt_dir/pillar/secrets.sls; then kafka_cluster_id=$(get_random_value 22) echo ' kafka_cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/secrets.sls -else - echo 'kafka_cluster_id exists' - salt-call pillar.get secrets +fi + +if ! grep -q "^ kafkapass:" $local_salt_dir/pillar/secrets.sls; then + kafkapass=$(get_random_value) + echo ' kafkapass: '$kafkapass >> $local_salt_dir/pillar/secrets.sls fi \ No newline at end of file diff --git a/setup/so-functions b/setup/so-functions index a669c52fc..176349edb 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1116,6 +1116,7 @@ generate_passwords(){ SOCSRVKEY=$(get_random_value 64) IMPORTPASS=$(get_random_value) KAFKACLUSTERID=$(get_random_value 22) + KAFKAPASS=$(get_random_value) } generate_interface_vars() { @@ -1947,7 +1948,8 @@ secrets_pillar(){ "secrets:"\ " import_pass: $IMPORTPASS"\ " influx_pass: $INFLUXPASS"\ - " kafka_cluster_id: $KAFKACLUSTERID" > $local_salt_dir/pillar/secrets.sls + " kafka_cluster_id: $KAFKACLUSTERID"\ + " kafka_pass: $KAFKAPASS" > $local_salt_dir/pillar/secrets.sls fi } From 6b28dc72e86ee51476b5e95dedd28361223435ce Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 11 Apr 2024 15:38:33 -0400 Subject: [PATCH 387/777] Update annotation for global.pipeline Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/global/soc_global.yaml | 3 +-- salt/kafka/config.sls | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/salt/global/soc_global.yaml b/salt/global/soc_global.yaml index daaf28b28..667bf7826 100644 --- a/salt/global/soc_global.yaml +++ b/salt/global/soc_global.yaml @@ -36,9 +36,8 @@ global: global: True advanced: True pipeline: - description: Sets which pipeline technology for events to use. Currently only Redis is supported. + description: Sets which pipeline technology for events to use. Currently only Redis is fully supported. Kafka is experimental and requires a Security Onion Pro license. global: True - readonly: True advanced: True repo_host: description: Specify the host where operating system packages will be served from. diff --git a/salt/kafka/config.sls b/salt/kafka/config.sls index c856c4f80..c9e028ff5 100644 --- a/salt/kafka/config.sls +++ b/salt/kafka/config.sls @@ -26,7 +26,7 @@ include: - ssl - +g kafka_group: group.present: - name: kafka From 39555873729731bbf79ee3ebbe32d7c4c01e9ad9 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 11 Apr 2024 16:20:09 -0400 Subject: [PATCH 388/777] Use global.pipeline for redis / kafka states Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/global/defaults.yaml | 3 ++- salt/global/soc_global.yaml | 2 ++ salt/kafka/init.sls | 3 ++- salt/manager/tools/sbin/soup | 3 ++- salt/redis/init.sls | 4 ++-- salt/vars/kafkanode.map.jinja | 1 - setup/so-functions | 1 - 7 files changed, 10 insertions(+), 7 deletions(-) delete mode 100644 salt/vars/kafkanode.map.jinja diff --git a/salt/global/defaults.yaml b/salt/global/defaults.yaml index bd7244a58..5daa942c8 100644 --- a/salt/global/defaults.yaml +++ b/salt/global/defaults.yaml @@ -1,2 +1,3 @@ global: - pcapengine: STENO \ No newline at end of file + pcapengine: STENO + pipeline: REDIS \ No newline at end of file diff --git a/salt/global/soc_global.yaml b/salt/global/soc_global.yaml index 667bf7826..5a349a3c3 100644 --- a/salt/global/soc_global.yaml +++ b/salt/global/soc_global.yaml @@ -37,6 +37,8 @@ global: advanced: True pipeline: description: Sets which pipeline technology for events to use. Currently only Redis is fully supported. Kafka is experimental and requires a Security Onion Pro license. + regex: ^(REDIS|KAFKA)$ + regexFailureMessage: You must enter either REDIS or KAFKA. global: True advanced: True repo_host: diff --git a/salt/kafka/init.sls b/salt/kafka/init.sls index b4a6a28b0..acedba3c3 100644 --- a/salt/kafka/init.sls +++ b/salt/kafka/init.sls @@ -4,9 +4,10 @@ # Elastic License 2.0. {% from 'kafka/map.jinja' import KAFKAMERGED %} +{% from 'vars/globals.map.jinja' import GLOBALS %} include: -{% if KAFKAMERGED.enabled %} +{% if GLOBALS.pipeline == "KAFKA" and KAFKAMERGED.enabled %} - kafka.enabled {% else %} - kafka.disabled diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index fa3c3b5ee..3ca353856 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -438,7 +438,8 @@ post_to_2.4.60() { } post_to_2.4.70() { - echo "Nothing to apply" + echo "Removing global.pipeline pillar configuration" + sed -i '/pipeline:/d' /opt/so/saltstack/local/pillar/global/soc_global.sls POSTVERSION=2.4.70 } diff --git a/salt/redis/init.sls b/salt/redis/init.sls index 4936c3254..7142c92c3 100644 --- a/salt/redis/init.sls +++ b/salt/redis/init.sls @@ -4,10 +4,10 @@ # Elastic License 2.0. {% from 'redis/map.jinja' import REDISMERGED %} -{% from 'kafka/map.jinja' import KAFKAMERGED %} +{% from 'vars/globals.map.jinja' import GLOBALS %} include: -{% if REDISMERGED.enabled and not KAFKAMERGED.enabled %} +{% if GLOBALS.pipeline == "REDIS" and REDISMERGED.enabled %} - redis.enabled {% else %} - redis.disabled diff --git a/salt/vars/kafkanode.map.jinja b/salt/vars/kafkanode.map.jinja deleted file mode 100644 index 396cefcc9..000000000 --- a/salt/vars/kafkanode.map.jinja +++ /dev/null @@ -1 +0,0 @@ -{% set ROLE_GLOBALS = {} %} \ No newline at end of file diff --git a/setup/so-functions b/setup/so-functions index 176349edb..038a4deb4 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1327,7 +1327,6 @@ create_global() { # Continue adding other details echo " imagerepo: '$IMAGEREPO'" >> $global_pillar_file - echo " pipeline: 'redis'" >> $global_pillar_file echo " repo_host: '$HOSTNAME'" >> $global_pillar_file echo " influxdb_host: '$HOSTNAME'" >> $global_pillar_file echo " registry_host: '$HOSTNAME'" >> $global_pillar_file From f514e5e9bba49f21b6e57ce41a40aceef8710c28 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 11 Apr 2024 16:23:05 -0400 Subject: [PATCH 389/777] add kafka to receiver --- salt/top.sls | 9 --------- 1 file changed, 9 deletions(-) diff --git a/salt/top.sls b/salt/top.sls index 289dd462b..e4cd067c3 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -236,16 +236,7 @@ base: - logstash - redis - elasticfleet.install_agent_grid - - '*_kafkanode and G@saltversion:{{saltversion}}': - - match: compound - kafka - - logstash - - ssl - - telegraf - - firewall - - docker_clean - - elasticfleet.install_agent_grid '*_idh and G@saltversion:{{saltversion}}': - match: compound From 49ccd86c399296700ae5b956df0f30abd71ec36d Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Fri, 12 Apr 2024 08:35:44 -0400 Subject: [PATCH 390/777] Fix fingerprint paths --- salt/soc/defaults.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index ac1fc1993..cf51c07fd 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1258,7 +1258,7 @@ soc: elastAlertRulesFolder: /opt/sensoroni/elastalert reposFolder: /opt/sensoroni/sigma/repos rulesFingerprintFile: /opt/sensoroni/fingerprints/sigma.fingerprint - stateFilePath: /opt/so/conf/soc/fingerprints/elastalertengine.state + stateFilePath: /opt/sensoroni/fingerprints/elastalertengine.state rulesRepos: - repo: https://github.com/Security-Onion-Solutions/securityonion-resources license: Elastic-2.0 @@ -1319,7 +1319,7 @@ soc: - repo: https://github.com/Security-Onion-Solutions/securityonion-yara license: DRL yaraRulesFolder: /opt/sensoroni/yara/rules - stateFilePath: /opt/so/conf/soc/fingerprints/strelkaengine.state + stateFilePath: /opt/sensoroni/fingerprints/strelkaengine.state suricataengine: allowRegex: '' autoUpdateEnabled: true @@ -1327,7 +1327,7 @@ soc: communityRulesFile: /nsm/rules/suricata/emerging-all.rules denyRegex: '' rulesFingerprintFile: /opt/sensoroni/fingerprints/emerging-all.fingerprint - stateFilePath: /opt/so/conf/soc/fingerprints/suricataengine.state + stateFilePath: /opt/sensoroni/fingerprints/suricataengine.state client: enableReverseLookup: false docsUrl: /docs/ From a54a72c2696f32b9d565efce7d9748b3bd148070 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 12 Apr 2024 11:19:20 -0400 Subject: [PATCH 391/777] move kafka_cluster_id to kafka:cluster_id --- pillar/top.sls | 4 +++- salt/allowed_states.map.jinja | 20 +++++++------------- salt/kafka/soc_kafka.yaml | 6 ++++++ salt/kafka/storage.sls | 15 ++++++++------- salt/manager/tools/sbin/so-kafka-clusterid | 11 ++++++----- salt/ssl/init.sls | 4 ++-- 6 files changed, 32 insertions(+), 28 deletions(-) diff --git a/pillar/top.sls b/pillar/top.sls index 61f4f338f..170b3f759 100644 --- a/pillar/top.sls +++ b/pillar/top.sls @@ -61,7 +61,7 @@ base: - backup.adv_backup - minions.{{ grains.id }} - minions.adv_{{ grains.id }} - - kafka.nodes + - kafka.* - stig.soc_stig '*_sensor': @@ -177,6 +177,7 @@ base: - minions.{{ grains.id }} - minions.adv_{{ grains.id }} - stig.soc_stig + - kafka.* '*_heavynode': - elasticsearch.auth @@ -233,6 +234,7 @@ base: - redis.adv_redis - minions.{{ grains.id }} - minions.adv_{{ grains.id }} + - kafka.* '*_kafkanode': - logstash.nodes diff --git a/salt/allowed_states.map.jinja b/salt/allowed_states.map.jinja index 6fa60c2ea..091cb3786 100644 --- a/salt/allowed_states.map.jinja +++ b/salt/allowed_states.map.jinja @@ -123,7 +123,8 @@ 'utility', 'schedule', 'docker_clean', - 'stig' + 'stig', + 'kafka' ], 'so-searchnode': [ 'ssl', @@ -157,7 +158,8 @@ 'schedule', 'tcpreplay', 'docker_clean', - 'stig' + 'stig', + 'kafka' ], 'so-sensor': [ 'ssl', @@ -188,16 +190,8 @@ 'telegraf', 'firewall', 'schedule', - 'docker_clean' - ], - 'so-kafkanode': [ - 'kafka', - 'logstash', - 'ssl', - 'telegraf', - 'firewall', - 'schedule', - 'docker_clean' + 'docker_clean', + 'kafka' ], 'so-desktop': [ 'ssl', @@ -214,7 +208,7 @@ {% do allowed_states.append('strelka') %} {% endif %} - {% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-import', 'so-kafkanode'] %} + {% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-import'] %} {% do allowed_states.append('elasticsearch') %} {% endif %} diff --git a/salt/kafka/soc_kafka.yaml b/salt/kafka/soc_kafka.yaml index 2fec8c302..8a6c516a9 100644 --- a/salt/kafka/soc_kafka.yaml +++ b/salt/kafka/soc_kafka.yaml @@ -2,6 +2,12 @@ kafka: enabled: description: Enable or disable Kafka. helpLink: kafka.html + cluster_id: + description: The ID of the Kafka cluster. + readonly: True + advanced: True + sensitive: True + helpLink: kafka.html config: server: advertised_x_listeners: diff --git a/salt/kafka/storage.sls b/salt/kafka/storage.sls index e99455e3d..fbb7c7328 100644 --- a/salt/kafka/storage.sls +++ b/salt/kafka/storage.sls @@ -6,17 +6,18 @@ {% from 'allowed_states.map.jinja' import allowed_states %} {% if sls.split('.')[0] in allowed_states %} {% from 'vars/globals.map.jinja' import GLOBALS %} -{% set kafka_cluster_id = salt['pillar.get']('secrets:kafka_cluster_id', default=None) %} +{% set kafka_cluster_id = salt['pillar.get']('kafka:cluster_id', default=None) %} -{% if kafka_cluster_id is none %} +{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone'] %} +{% if kafka_cluster_id is none %} generate_kafka_cluster_id: cmd.run: - name: /usr/sbin/so-kafka-clusterid -{% endif %} +{% endif %} +{% endif %} {# Initialize kafka storage if it doesn't already exist. Just looking for meta.properties in /nsm/kafka/data #} -{% if salt['file.file_exists']('/nsm/kafka/data/meta.properties') %} -{% else %} +{% if not salt['file.file_exists']('/nsm/kafka/data/meta.properties') %} kafka_storage_init: cmd.run: - name: | @@ -25,7 +26,7 @@ kafka_rm_kafkainit: cmd.run: - name: | docker rm so-kafkainit -{% endif %} +{% endif %} {% else %} @@ -34,4 +35,4 @@ kafka_rm_kafkainit: test.fail_without_changes: - name: {{sls}}_state_not_allowed -{% endif %} \ No newline at end of file +{% endif %} diff --git a/salt/manager/tools/sbin/so-kafka-clusterid b/salt/manager/tools/sbin/so-kafka-clusterid index 719973247..def454372 100644 --- a/salt/manager/tools/sbin/so-kafka-clusterid +++ b/salt/manager/tools/sbin/so-kafka-clusterid @@ -13,10 +13,11 @@ else source $(dirname $0)/../../../common/tools/sbin/so-common fi -if ! grep -q "^ kafka_cluster_id:" $local_salt_dir/pillar/secrets.sls; then +if ! grep -q "^ cluster_id:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then kafka_cluster_id=$(get_random_value 22) - echo ' kafka_cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/secrets.sls + echo 'kafka: ' > $local_salt_dir/pillar/kafka/soc_kafka.sls + echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls else - echo 'kafka_cluster_id exists' - salt-call pillar.get secrets -fi \ No newline at end of file + echo 'kafka:cluster_id pillar exists' + salt-call pillar.get kafka:cluster_id +fi diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index 90f9cc64f..0aa06bc8e 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -666,7 +666,7 @@ elastickeyperms: # Roles will need to be modified. Below is just for testing encrypted kafka pipelines # Remove so-manager. Just inplace for testing -{% if grains['role'] in ['so-manager', 'so-kafkanode', 'so-searchnode'] %} +{% if grains['role'] in ['so-manager', 'so-receiver', 'so-searchnode'] %} # Create a cert for Redis encryption kafka_key: x509.private_key_managed: @@ -770,7 +770,7 @@ kafka_logstash_crt: - onchanges: - x509: /etc/pki/kafka-logstash.key -{% if grains['role'] in ['so-manager'] %} +{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-receiver'] %} kafka_client_key: x509.private_key_managed: - name: /etc/pki/kafka-client.key From 0ed9894b7e921924193df51d11c2572689f7376e Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 12 Apr 2024 11:19:46 -0400 Subject: [PATCH 392/777] create kratos local pillar dirs during setup --- setup/so-functions | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 070711d63..153a3371c 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1115,7 +1115,6 @@ generate_passwords(){ REDISPASS=$(get_random_value) SOCSRVKEY=$(get_random_value 64) IMPORTPASS=$(get_random_value) - KAFKACLUSTERID=$(get_random_value 22) } generate_interface_vars() { @@ -1392,7 +1391,7 @@ make_some_dirs() { mkdir -p $local_salt_dir/salt/firewall/portgroups mkdir -p $local_salt_dir/salt/firewall/ports - for THEDIR in bpf pcap elasticsearch ntp firewall redis backup influxdb strelka sensoroni soc docker zeek suricata nginx telegraf logstash soc manager kratos idstools idh elastalert stig global;do + for THEDIR in bpf pcap elasticsearch ntp firewall redis backup influxdb strelka sensoroni soc docker zeek suricata nginx telegraf logstash soc manager kratos idstools idh elastalert stig global kafka;do mkdir -p $local_salt_dir/pillar/$THEDIR touch $local_salt_dir/pillar/$THEDIR/adv_$THEDIR.sls touch $local_salt_dir/pillar/$THEDIR/soc_$THEDIR.sls @@ -1948,8 +1947,7 @@ secrets_pillar(){ printf '%s\n'\ "secrets:"\ " import_pass: $IMPORTPASS"\ - " influx_pass: $INFLUXPASS"\ - " kafka_cluster_id: $KAFKACLUSTERID" > $local_salt_dir/pillar/secrets.sls + " influx_pass: $INFLUXPASS" > $local_salt_dir/pillar/secrets.sls fi } From fbd3cff90d6665dce961ed0b56ea6352526e128b Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Fri, 12 Apr 2024 11:21:19 -0400 Subject: [PATCH 393/777] Make global.pipeline use GLOBALMERGED value Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/vars/globals.map.jinja | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/vars/globals.map.jinja b/salt/vars/globals.map.jinja index ed7129678..0a4995c0c 100644 --- a/salt/vars/globals.map.jinja +++ b/salt/vars/globals.map.jinja @@ -23,7 +23,7 @@ 'manager_ip': INIT.PILLAR.global.managerip, 'md_engine': INIT.PILLAR.global.mdengine, 'pcap_engine': GLOBALMERGED.pcapengine, - 'pipeline': INIT.PILLAR.global.pipeline, + 'pipeline': GLOBALMERGED.pipeline, 'so_version': INIT.PILLAR.global.soversion, 'so_docker_gateway': DOCKER.gateway, 'so_docker_range': DOCKER.range, From 04ddcd5c9328d090d4fe000eefef8af111abd7cb Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 12 Apr 2024 11:52:57 -0400 Subject: [PATCH 394/777] add receiver managersearch and standalone to kafka.nodes pillar --- pillar/kafka/nodes.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pillar/kafka/nodes.sls b/pillar/kafka/nodes.sls index b1842834c..447e7a35d 100644 --- a/pillar/kafka/nodes.sls +++ b/pillar/kafka/nodes.sls @@ -1,4 +1,4 @@ -{% set current_kafkanodes = salt.saltutil.runner('mine.get', tgt='G@role:so-kafkanode or G@role:so-manager', fun='network.ip_addrs', tgt_type='compound') %} +{% set current_kafkanodes = salt.saltutil.runner('mine.get', tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-receiver', fun='network.ip_addrs', tgt_type='compound') %} {% set pillar_kafkanodes = salt['pillar.get']('kafka:nodes', default={}, merge=True) %} {% set existing_ids = [] %} From d73ba7dd3e31a3ea1c3bb27788d78178f8c6cd86 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 12 Apr 2024 11:55:26 -0400 Subject: [PATCH 395/777] order kafka pillar assignment --- pillar/top.sls | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/pillar/top.sls b/pillar/top.sls index 170b3f759..fbb1604da 100644 --- a/pillar/top.sls +++ b/pillar/top.sls @@ -61,7 +61,9 @@ base: - backup.adv_backup - minions.{{ grains.id }} - minions.adv_{{ grains.id }} - - kafka.* + - kafka.nodes + - kafka.soc_kafka + - kafka.adv_kafka - stig.soc_stig '*_sensor': @@ -177,7 +179,9 @@ base: - minions.{{ grains.id }} - minions.adv_{{ grains.id }} - stig.soc_stig - - kafka.* + - kafka.nodes + - kafka.soc_kafka + - kafka.adv_kafka '*_heavynode': - elasticsearch.auth @@ -234,16 +238,9 @@ base: - redis.adv_redis - minions.{{ grains.id }} - minions.adv_{{ grains.id }} - - kafka.* - - '*_kafkanode': - - logstash.nodes - - logstash.soc_logstash - - logstash.adv_logstash - - minions.{{ grains.id }} - - minions.adv_{{ grains.id }} - - secrets - kafka.nodes + - kafka.soc_kafka + - kafka.adv_kafka '*_import': - secrets From a6ff92b099faa1c1a43def5e85331d084e18edc8 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Fri, 12 Apr 2024 12:11:18 -0400 Subject: [PATCH 396/777] Note to remove so-kafka-clusterid. Update soup and setup to generate needed kafka pillar values Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/manager/tools/sbin/so-kafka-clusterid | 5 +++++ salt/manager/tools/sbin/soup | 17 +++++++++++++++++ setup/so-functions | 13 +++++++++++++ setup/so-variables | 6 ++++++ 4 files changed, 41 insertions(+) diff --git a/salt/manager/tools/sbin/so-kafka-clusterid b/salt/manager/tools/sbin/so-kafka-clusterid index adddfe3ce..7ac055997 100644 --- a/salt/manager/tools/sbin/so-kafka-clusterid +++ b/salt/manager/tools/sbin/so-kafka-clusterid @@ -5,6 +5,11 @@ # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. + + +### THIS SCRIPT AND SALT STATE REFERENCES TO THIS SCRIPT TO BE REMOVED ONCE INITIAL TESTING IS DONE - THESE VALUES WILL GENERATED IN SETUP AND SOUP + + local_salt_dir=/opt/so/saltstack/local if [[ -f /usr/sbin/so-common ]]; then diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 3ca353856..a6f9032a5 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -438,8 +438,25 @@ post_to_2.4.60() { } post_to_2.4.70() { + # Global pipeline changes to REDIS or KAFKA echo "Removing global.pipeline pillar configuration" sed -i '/pipeline:/d' /opt/so/saltstack/local/pillar/global/soc_global.sls + + # Kafka configuration + mkdir -p /opt/so/saltstack/local/pillar/kafka + touch /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls + touch /opt/so/saltstack/local/pillar/kafka/adv_kafka.sls + echo 'kafka: ' > /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls + + if ! grep -q "^ cluster_id:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then + kafka_cluster_id=$(get_random_value 22) + echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls + + if ! grep -q "^ certpass:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then + kafkapass=$(get_random_value) + echo ' certpass: '$kafkapass >> $local_salt_dir/pillar/kafka/soc_kafka.sls + fi + POSTVERSION=2.4.70 } diff --git a/setup/so-functions b/setup/so-functions index 2332ab94c..30e8fbfd6 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -803,6 +803,7 @@ create_manager_pillars() { patch_pillar nginx_pillar kibana_pillar + kafka_pillar } create_repo() { @@ -1191,6 +1192,18 @@ kibana_pillar() { logCmd "touch $kibana_pillar_file" } +kafka_pillar() { + KAFKACLUSTERID=$(get_random_value 22) + KAFKAPASS=$(get_random_value) + logCmd "mkdir -p $local_salt_dir/pillar/kakfa" + logCmd "touch $adv_kafka_pillar_file" + logCmd "touch $kafka_pillar_file" + printf '%s\n'\ + "kafka:"\ + " cluster_id: $KAFKACLUSTERID"\ + " certpass: $KAFKAPASS" > $kafka_pillar_file +} + logrotate_pillar() { logCmd "mkdir -p $local_salt_dir/pillar/logrotate" logCmd "touch $adv_logrotate_pillar_file" diff --git a/setup/so-variables b/setup/so-variables index 42ed8fc5c..4a2f29c58 100644 --- a/setup/so-variables +++ b/setup/so-variables @@ -178,6 +178,12 @@ export redis_pillar_file adv_redis_pillar_file="$local_salt_dir/pillar/redis/adv_redis.sls" export adv_redis_pillar_file +kafka_pillar_file="local_salt_dir/pillar/kafka/soc_kafka.sls" +export kafka_pillar_file + +adv_kafka_pillar_file="$local_salt_dir/pillar/kafka/adv_kafka.sls" +export kafka_pillar_file + idh_pillar_file="$local_salt_dir/pillar/idh/soc_idh.sls" export idh_pillar_file From 911ee579a9e8a92b751695d12b7f449358df8260 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Fri, 12 Apr 2024 12:16:20 -0400 Subject: [PATCH 397/777] Typo Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/config.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/kafka/config.sls b/salt/kafka/config.sls index c9e028ff5..c856c4f80 100644 --- a/salt/kafka/config.sls +++ b/salt/kafka/config.sls @@ -26,7 +26,7 @@ include: - ssl -g + kafka_group: group.present: - name: kafka From f5e42e73af53b98387e7a56d2bc6571544949a4b Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Fri, 12 Apr 2024 13:30:20 -0400 Subject: [PATCH 398/777] Add docs for ruleset change --- salt/idstools/soc_idstools.yaml | 2 +- salt/soc/defaults.yaml | 6 +++--- salt/soc/soc_soc.yaml | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/salt/idstools/soc_idstools.yaml b/salt/idstools/soc_idstools.yaml index 3e3a68117..0a2bf0cbf 100644 --- a/salt/idstools/soc_idstools.yaml +++ b/salt/idstools/soc_idstools.yaml @@ -9,7 +9,7 @@ idstools: forcedType: string helpLink: rules.html ruleset: - description: 'Defines the ruleset you want to run. Options are ETOPEN or ETPRO. WARNING! Changing the ruleset will remove all existing Suricata rules of the previous ruleset and their associated overrides. This removal cannot be undone.' + description: 'Defines the ruleset you want to run. Options are ETOPEN or ETPRO. Once you have changed the ruleset here, you will need to wait for the rule update to take place (every 8 hours), or you can force the update by nagivating to Detections --> Options dropdown menu --> Suricata --> Full Update. WARNING! Changing the ruleset will remove all existing Suricata rules of the previous ruleset and their associated overrides. This removal cannot be undone.' global: True regex: ETPRO\b|ETOPEN\b helpLink: rules.html diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index cf51c07fd..b8fcf0581 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1253,7 +1253,7 @@ soc: - core+critical - securityonion-resources+critical - securityonion-resources+high - communityRulesImportFrequencySeconds: 86400 + communityRulesImportFrequencySeconds: 28800 denyRegex: '' elastAlertRulesFolder: /opt/sensoroni/elastalert reposFolder: /opt/sensoroni/sigma/repos @@ -1311,7 +1311,7 @@ soc: strelkaengine: allowRegex: '' autoUpdateEnabled: true - communityRulesImportFrequencySeconds: 86400 + communityRulesImportFrequencySeconds: 28800 compileYaraPythonScriptPath: /opt/so/conf/strelka/compile_yara.py denyRegex: '' reposFolder: /opt/sensoroni/yara/repos @@ -1323,7 +1323,7 @@ soc: suricataengine: allowRegex: '' autoUpdateEnabled: true - communityRulesImportFrequencySeconds: 86400 + communityRulesImportFrequencySeconds: 28800 communityRulesFile: /nsm/rules/suricata/emerging-all.rules denyRegex: '' rulesFingerprintFile: /opt/sensoroni/fingerprints/emerging-all.fingerprint diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index ed9acc47a..42b80a3f0 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -110,7 +110,7 @@ soc: forcedType: "[]{}" helpLink: sigma.html sigmaRulePackages: - description: 'Defines the Sigma Community Ruleset you want to run. One of these (core | core+ | core++ | all ) as well as an optional Add-on (emerging_threats_addon). WARNING! Changing the ruleset will remove all existing Sigma rules of the previous ruleset and their associated overrides. This removal cannot be undone.' + description: 'Defines the Sigma Community Ruleset you want to run. One of these (core | core+ | core++ | all ) as well as an optional Add-on (emerging_threats_addon). Once you have changed the ruleset here, you will need to wait for the rule update to take place (every 8 hours), or you can force the update by nagivating to Detections --> Options dropdown menu --> Elastalert --> Full Update. WARNING! Changing the ruleset will remove all existing Sigma rules of the previous ruleset and their associated overrides. This removal cannot be undone.' global: True advanced: False helpLink: sigma.html From c014508519d024ce6425955bdc1512f22991a22d Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 12 Apr 2024 13:50:25 -0400 Subject: [PATCH 399/777] need /opt/so/conf/ca/cacerts on receiver for kafka to run --- salt/allowed_states.map.jinja | 3 ++- salt/elasticsearch/ca.sls | 2 +- salt/kafka/enabled.sls | 1 + 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/salt/allowed_states.map.jinja b/salt/allowed_states.map.jinja index 091cb3786..57cff5b4f 100644 --- a/salt/allowed_states.map.jinja +++ b/salt/allowed_states.map.jinja @@ -191,7 +191,8 @@ 'firewall', 'schedule', 'docker_clean', - 'kafka' + 'kafka', + 'elasticsearch.ca' ], 'so-desktop': [ 'ssl', diff --git a/salt/elasticsearch/ca.sls b/salt/elasticsearch/ca.sls index 5485bb676..188450311 100644 --- a/salt/elasticsearch/ca.sls +++ b/salt/elasticsearch/ca.sls @@ -4,7 +4,7 @@ # Elastic License 2.0. {% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} +{% if sls.split('.')[0] in allowed_states or sls in allowed_states %} {% from 'vars/globals.map.jinja' import GLOBALS %} # Move our new CA over so Elastic and Logstash can use SSL with the internal CA diff --git a/salt/kafka/enabled.sls b/salt/kafka/enabled.sls index ed26297b3..a42b6f18b 100644 --- a/salt/kafka/enabled.sls +++ b/salt/kafka/enabled.sls @@ -10,6 +10,7 @@ {% set KAFKANODES = salt['pillar.get']('kafka:nodes', {}) %} include: + - elasticsearch.ca - kafka.sostatus - kafka.config - kafka.storage From bb983d4ba2570c1764f4131743c82b98700d595a Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 12 Apr 2024 16:16:03 -0400 Subject: [PATCH 400/777] just broker as default process --- salt/kafka/defaults.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/kafka/defaults.yaml b/salt/kafka/defaults.yaml index 7828f0536..91f55a07d 100644 --- a/salt/kafka/defaults.yaml +++ b/salt/kafka/defaults.yaml @@ -2,7 +2,7 @@ kafka: enabled: False config: server: - advertised_x_listeners: BROKER://10.66.166.231:9092 + advertised_x_listeners: auto_x_create_x_topics_x_enable: true controller_x_listener_x_names: CONTROLLER controller_x_quorum_x_voters: @@ -13,13 +13,13 @@ kafka: log_x_retention_x_check_x_interval_x_ms: 300000 log_x_retention_x_hours: 168 log_x_segment_x_bytes: 1073741824 - node_x_id: + node_x_id: num_x_io_x_threads: 8 num_x_network_x_threads: 3 num_x_partitions: 1 num_x_recovery_x_threads_x_per_x_data_x_dir: 1 offsets_x_topic_x_replication_x_factor: 1 - process_x_roles: broker,controller + process_x_roles: broker socket_x_receive_x_buffer_x_bytes: 102400 socket_x_request_x_max_x_bytes: 104857600 socket_x_send_x_buffer_x_bytes: 102400 From de6ea29e3b155de07278dbdee1ff1f63e758f19f Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Fri, 12 Apr 2024 16:18:53 -0400 Subject: [PATCH 401/777] update default process.role to broker only Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/defaults.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/kafka/defaults.yaml b/salt/kafka/defaults.yaml index 7828f0536..3da14acb4 100644 --- a/salt/kafka/defaults.yaml +++ b/salt/kafka/defaults.yaml @@ -19,7 +19,7 @@ kafka: num_x_partitions: 1 num_x_recovery_x_threads_x_per_x_data_x_dir: 1 offsets_x_topic_x_replication_x_factor: 1 - process_x_roles: broker,controller + process_x_roles: broker socket_x_receive_x_buffer_x_bytes: 102400 socket_x_request_x_max_x_bytes: 104857600 socket_x_send_x_buffer_x_bytes: 102400 From c4994a208b9f0746cc7b3867f8623faba41d718a Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 15 Apr 2024 11:37:21 -0400 Subject: [PATCH 402/777] restart salt minion if a manager and signing policies change --- salt/salt/minion.sls | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/salt/salt/minion.sls b/salt/salt/minion.sls index 2cc032745..8c6f7f019 100644 --- a/salt/salt/minion.sls +++ b/salt/salt/minion.sls @@ -13,6 +13,9 @@ include: - systemd.reload - repo.client - salt.mine_functions +{% if GLOBALS.role in GLOBALS.manager_roles %} + - ca +{% endif %} {% if INSTALLEDSALTVERSION|string != SALTVERSION|string %} @@ -98,5 +101,8 @@ salt_minion_service: - file: mine_functions {% if INSTALLEDSALTVERSION|string == SALTVERSION|string %} - file: set_log_levels +{% endif %} +{% if GLOBALS.role in GLOBALS.manager_roles %} + - file: /etc/salt/minion.d/signing_policies.conf {% endif %} - order: last From dbfb178556fb7b7458d195120d0496434c3aa339 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Tue, 16 Apr 2024 12:22:53 -0400 Subject: [PATCH 403/777] Add test --- salt/soc/defaults.yaml | 2 ++ salt/soc/soc_soc.yaml | 5 +++++ 2 files changed, 7 insertions(+) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index b8fcf0581..c39b7bd90 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1310,6 +1310,8 @@ soc: - rbac/users_roles strelkaengine: allowRegex: '' + autoEnabledYaraRules: + - securityonion-yara autoUpdateEnabled: true communityRulesImportFrequencySeconds: 28800 compileYaraPythonScriptPath: /opt/so/conf/strelka/compile_yara.py diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index 42b80a3f0..2001fb0c1 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -186,6 +186,11 @@ soc: global: True advanced: True helpLink: yara.html + autoEnabledYaraRules: + description: 'Yara rules to automatically enable on initial import. Format is $Ruleset - for example, for the default shipped ruleset: securityonion-yara' + global: True + advanced: True + helpLink: sigma.html autoUpdateEnabled: description: 'Set to true to enable automatic Internet-connected updates of the Yara rulesets. If this is an Airgap system, this setting will be overridden and set to false.' global: True From 8cc4d2668ed5f47e1e984c2e2920d80fe33e6ef9 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Tue, 16 Apr 2024 12:52:14 -0400 Subject: [PATCH 404/777] Move compile_yara --- .../files/bin => strelka/compile_yara}/compile_yara.py | 5 +++++ salt/strelka/config.sls | 8 ++++++++ 2 files changed, 13 insertions(+) rename salt/{soc/files/bin => strelka/compile_yara}/compile_yara.py (58%) diff --git a/salt/soc/files/bin/compile_yara.py b/salt/strelka/compile_yara/compile_yara.py similarity index 58% rename from salt/soc/files/bin/compile_yara.py rename to salt/strelka/compile_yara/compile_yara.py index 43c8b1a09..b840dcab6 100644 --- a/salt/soc/files/bin/compile_yara.py +++ b/salt/strelka/compile_yara/compile_yara.py @@ -1,3 +1,8 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + import os import yara import glob diff --git a/salt/strelka/config.sls b/salt/strelka/config.sls index 929bef113..f6c66d8ff 100644 --- a/salt/strelka/config.sls +++ b/salt/strelka/config.sls @@ -14,6 +14,14 @@ strelkaconfdir: - group: 939 - makedirs: True +strelkacompileyara: + file.managed: + - name: /opt/so/conf/strelka/compile_yara.py + - source: salt://strelka/compile_yara/compile_yara.py + - user: 939 + - group: 939 + - makedirs: True + strelkarulesdir: file.directory: - name: /opt/so/conf/strelka/rules From ff284761913ed4881c9d98ba68e2022ebcdbb1e6 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Tue, 16 Apr 2024 13:10:17 -0400 Subject: [PATCH 405/777] Fix compile_yara path --- salt/soc/defaults.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index c39b7bd90..6a1376478 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1314,7 +1314,7 @@ soc: - securityonion-yara autoUpdateEnabled: true communityRulesImportFrequencySeconds: 28800 - compileYaraPythonScriptPath: /opt/so/conf/strelka/compile_yara.py + compileYaraPythonScriptPath: /opt/sensoroni/yara/compile_yara.py denyRegex: '' reposFolder: /opt/sensoroni/yara/repos rulesRepos: From 4b79623ce31edfbf59763d41747ddb7a79fe7220 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 16 Apr 2024 16:51:35 -0400 Subject: [PATCH 406/777] watch pillar files for changes and do something --- salt/salt/engines/master/pillarWatch.py | 72 +++++++++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 salt/salt/engines/master/pillarWatch.py diff --git a/salt/salt/engines/master/pillarWatch.py b/salt/salt/engines/master/pillarWatch.py new file mode 100644 index 000000000..c45e2383d --- /dev/null +++ b/salt/salt/engines/master/pillarWatch.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- + +import logging +from time import sleep +import os +import salt.client +import re + +log = logging.getLogger(__name__) +local = salt.client.LocalClient() + +def start(fpa, interval=10): + log.info("##### PILLARWATCH STARTED #####") + + try: + # maybe change this location + dataFile = open("/opt/so/state/pillarWatch.txt", "r+") + df = dataFile.read() + log.info("df: %s" % str(df)) + except FileNotFoundError: + log.info("No previous pillarWatch data saved") + + currentValues = [] + + log.info("FPA: %s" % str(fpa)) + for i in fpa: + log.trace("files: %s" % i['files']) + log.trace("pillar: %s" % i['pillar']) + log.trace("action: %s" % i['action']) + pillarFiles = i['files'] + pillar = i['pillar'] + action = i['action'] + + patterns = pillar.split(".") + log.trace("pillar: %s" % pillar) + log.trace("patterns: %s" % patterns) + log.trace("patterns length: %i" % len(patterns)) + # this var is used to track how many times the pattern has been found in the pillar file so that we can access the proper index later + patternFound = 0 + for pillarFile in pillarFiles: + with open(pillarFile, "r") as file: + log.info("checking file: %s" % pillarFile) + for line in file: + log.trace("line: %s" % line) + log.trace("pillarWatch engine: looking for: %s" % patterns[patternFound]) + # since we are looping line by line through a pillar file, the next line will check if each line matches the progression of keys through the pillar + # ex. if we are looking for the value of global.pipeline, then this will loop through the pillar file until 'global' is found, then it will look + # for pipeline. once pipeline is found, it will record the value + if re.search(patterns[patternFound], line): + log.trace("PILLARWATCH FOUND: %s" % patterns[patternFound]) + patternFound += 1 + # we have found the final key in the pillar that we are looking for, get the value + if patternFound == len(patterns): + for l in df.splitlines(): + if pillar in l: + previousPillarValue = l.split(":")[1] + log.info("%s previousPillarValue:%s" % (pillar, str(previousPillarValue))) + currentPillarValue = str(line.split(":")[1]).strip() + log.info("%s currentPillarValue: %s" % (pillar,currentPillarValue)) + if pillar in df: + df = re.sub(r"\b{}\b.*".format(pillar), pillar + ': ' + currentPillarValue, df) + #df = df.replace(pillar, pillar + ': ' + currentPillarValue) + else: + df = pillar + ': ' + currentPillarValue + log.info("df: %s" % df) + #currentValues.append(pillar + ":" + currentPillarValue) + # we have found the pillar so we dont need to loop throught the file anymore + break + dataFile.seek(0) + dataFile.write(df) + dataFile.truncate() + dataFile.close() From 67a57e9df75221c45e0704b4880af4906b177d9c Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 17 Apr 2024 13:14:45 -0400 Subject: [PATCH 407/777] Update limited-analyst.json --- salt/elasticsearch/roles/limited-analyst.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/elasticsearch/roles/limited-analyst.json b/salt/elasticsearch/roles/limited-analyst.json index 9186b732e..6511e5f44 100644 --- a/salt/elasticsearch/roles/limited-analyst.json +++ b/salt/elasticsearch/roles/limited-analyst.json @@ -13,7 +13,8 @@ "monitor", "read", "read_cross_cluster", - "view_index_metadata" + "view_index_metadata", + "write" ] } ], From 665b7197a6a1820d4ced3bd2da4be7e2000c057a Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 17 Apr 2024 17:08:41 -0400 Subject: [PATCH 408/777] Update Kafka nodeid Update so-minion to include running kafka.nodes state to ensure nodeid is generated for new brokers Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- pillar/kafka/nodes.sls | 30 +------------ salt/kafka/enabled.sls | 7 ++- salt/kafka/files/managed_node_pillar.jinja | 7 +++ salt/kafka/nodes.map.jinja | 50 ++++++++++++++++++++++ salt/kafka/nodes.sls | 19 ++++++++ salt/manager/tools/sbin/so-minion | 4 ++ 6 files changed, 86 insertions(+), 31 deletions(-) create mode 100644 salt/kafka/files/managed_node_pillar.jinja create mode 100644 salt/kafka/nodes.map.jinja create mode 100644 salt/kafka/nodes.sls diff --git a/pillar/kafka/nodes.sls b/pillar/kafka/nodes.sls index 447e7a35d..ba14c219e 100644 --- a/pillar/kafka/nodes.sls +++ b/pillar/kafka/nodes.sls @@ -1,30 +1,2 @@ -{% set current_kafkanodes = salt.saltutil.runner('mine.get', tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-receiver', fun='network.ip_addrs', tgt_type='compound') %} -{% set pillar_kafkanodes = salt['pillar.get']('kafka:nodes', default={}, merge=True) %} - -{% set existing_ids = [] %} -{% for node in pillar_kafkanodes.values() %} - {% if node.get('id') %} - {% do existing_ids.append(node['nodeid']) %} - {% endif %} -{% endfor %} -{% set all_possible_ids = range(1, 256)|list %} - -{% set available_ids = [] %} -{% for id in all_possible_ids %} - {% if id not in existing_ids %} - {% do available_ids.append(id) %} - {% endif %} -{% endfor %} - -{% set final_nodes = pillar_kafkanodes.copy() %} - -{% for minionid, ip in current_kafkanodes.items() %} - {% set hostname = minionid.split('_')[0] %} - {% if hostname not in final_nodes %} - {% set new_id = available_ids.pop(0) %} - {% do final_nodes.update({hostname: {'nodeid': new_id, 'ip': ip[0]}}) %} - {% endif %} -{% endfor %} - kafka: - nodes: {{ final_nodes|tojson }} + nodes: \ No newline at end of file diff --git a/salt/kafka/enabled.sls b/salt/kafka/enabled.sls index a42b6f18b..3c4f548f1 100644 --- a/salt/kafka/enabled.sls +++ b/salt/kafka/enabled.sls @@ -1,5 +1,5 @@ # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. @@ -7,9 +7,12 @@ {% if sls.split('.')[0] in allowed_states %} {% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'docker/docker.map.jinja' import DOCKER %} -{% set KAFKANODES = salt['pillar.get']('kafka:nodes', {}) %} +{% from 'kafka/nodes.map.jinja' import COMBINED_KAFKANODES as KAFKANODES %} include: + {% if grains.role in ['so-manager', 'so-managersearch', 'so-standalone'] %} + - kafka.nodes + {% endif %} - elasticsearch.ca - kafka.sostatus - kafka.config diff --git a/salt/kafka/files/managed_node_pillar.jinja b/salt/kafka/files/managed_node_pillar.jinja new file mode 100644 index 000000000..fb2ef410e --- /dev/null +++ b/salt/kafka/files/managed_node_pillar.jinja @@ -0,0 +1,7 @@ +kafka: + nodes: +{% for node, values in COMBINED_KAFKANODES.items() %} + {{ node }}: + ip: {{ values['ip'] }} + nodeid: {{ values['nodeid'] }} +{% endfor %} \ No newline at end of file diff --git a/salt/kafka/nodes.map.jinja b/salt/kafka/nodes.map.jinja new file mode 100644 index 000000000..a6e36d36a --- /dev/null +++ b/salt/kafka/nodes.map.jinja @@ -0,0 +1,50 @@ +{% set current_kafkanodes = salt.saltutil.runner('mine.get', tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-receiver', fun='network.ip_addrs', tgt_type='compound') %} +{% set STORED_KAFKANODES = salt['pillar.get']('kafka:nodes', {}) %} + +{% set existing_ids = [] %} + +{# Check STORED_KAFKANODES for existing kafka nodes and pull their IDs so they are not reused across the grid #} +{% if STORED_KAFKANODES.get('nodes', {}).items() | length > 0 %} +{% for node, values in STORED_KAFKANODES.nodes.items() %} +{% if values.get('nodeid') %} +{% do existing_ids.append(values['nodeid']) %} +{% endif %} +{% endfor %} +{% endif %} + +{# Create list of possible node ids #} +{% set all_possible_ids = range(1, 65536)|list %} + +{# Don't like the below loop because the higher the range for all_possible_ids the more time spent on loop #} +{# Create list of available node ids by looping through all_possible_ids and ensuring it isn't in existing_ids #} +{% set available_ids = [] %} +{% for id in all_possible_ids %} +{% if id not in existing_ids %} +{% do available_ids.append(id) %} +{% endif %} +{% endfor %} + +{# Collect kafka eligible nodes and check if they're already in STORED_KAFKANODES to avoid potentially reassigning a nodeid #} +{% set NEW_KAFKANODES = {} %} +{% for minionid, ip in current_kafkanodes.items() %} +{% set hostname = minionid.split('_')[0] %} +{% if STORED_KAFKANODES.get('nodes', {}).items() | length > 0 and hostname not in STORED_KAFKANODES.nodes %} +{% set new_id = available_ids.pop(0) %} +{% do NEW_KAFKANODES.update({hostname: {'nodeid': new_id, 'ip': ip[0]}}) %} +{% endif %} +{% if hostname not in NEW_KAFKANODES %} +{% set new_id = available_ids.pop(0) %} +{% do NEW_KAFKANODES.update({hostname: {'nodeid': new_id, 'ip': ip[0]}}) %} +{% endif %} +{% endfor %} + +{# Combine STORED_KAFKANODES and NEW_KAFKANODES for writing to the pillar/kafka/nodes.sls #} +{% set COMBINED_KAFKANODES = {} %} +{% for node, details in NEW_KAFKANODES.items() %} +{% do COMBINED_KAFKANODES.update({node: details}) %} +{% endfor %} +{% if STORED_KAFKANODES.get('nodes', {}).items() | length > 0 %} +{% for node, details in STORED_KAFKANODES.nodes.items() %} +{% do COMBINED_KAFKANODES.update({node: details}) %} +{% endfor %} +{% endif %} diff --git a/salt/kafka/nodes.sls b/salt/kafka/nodes.sls new file mode 100644 index 000000000..5085c6cca --- /dev/null +++ b/salt/kafka/nodes.sls @@ -0,0 +1,19 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. +{% from 'vars/globals.map.jinja' import GLOBALS %} +{% if GLOBALS.pipeline == "KAFKA" %} +{% from 'kafka/nodes.map.jinja' import COMBINED_KAFKANODES %} + +{# Store kafka pillar in a file rather than memory where values could be lost. Kafka does not support nodeid's changing #} +write_kafka_pillar_yaml: + file.managed: + - name: /opt/so/saltstack/local/pillar/kafka/nodes.sls + - mode: 644 + - user: socore + - source: salt://kafka/files/managed_node_pillar.jinja + - template: jinja + - context: + COMBINED_KAFKANODES: {{ COMBINED_KAFKANODES }} +{% endif %} \ No newline at end of file diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index 34e069ece..72ae55209 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -616,6 +616,10 @@ function updateMineAndApplyStates() { salt $MINIONID state.apply elasticsearch queue=True --async salt $MINIONID state.apply soc queue=True --async fi + if [[ "$NODETYPE" == "RECEIVER" ]]; then + # Setup nodeid for Kafka + salt-call state.apply kafka.nodes queue=True + fi # run this async so the cli doesn't wait for a return salt "$MINION_ID" state.highstate --async queue=True } From 4caa6a10b586d733f02df376e715e9f4338fbe8e Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 17 Apr 2024 18:09:04 -0400 Subject: [PATCH 409/777] watch a pillar in files and take action --- salt/salt/engines/master/pillarWatch.py | 47 +++++++++++++++++++------ salt/salt/files/engines.conf | 30 ++++++++++++++++ 2 files changed, 66 insertions(+), 11 deletions(-) diff --git a/salt/salt/engines/master/pillarWatch.py b/salt/salt/engines/master/pillarWatch.py index c45e2383d..c6f343dc8 100644 --- a/salt/salt/engines/master/pillarWatch.py +++ b/salt/salt/engines/master/pillarWatch.py @@ -5,6 +5,7 @@ from time import sleep import os import salt.client import re +from ast import literal_eval log = logging.getLogger(__name__) local = salt.client.LocalClient() @@ -12,24 +13,26 @@ local = salt.client.LocalClient() def start(fpa, interval=10): log.info("##### PILLARWATCH STARTED #####") + # try to open the file that stores the previous runs data + # if the file doesn't exist, create a blank one try: # maybe change this location dataFile = open("/opt/so/state/pillarWatch.txt", "r+") - df = dataFile.read() - log.info("df: %s" % str(df)) except FileNotFoundError: log.info("No previous pillarWatch data saved") + dataFile = open("/opt/so/state/pillarWatch.txt", "w+") - currentValues = [] + df = dataFile.read() + log.info("df: %s" % str(df)) log.info("FPA: %s" % str(fpa)) for i in fpa: log.trace("files: %s" % i['files']) log.trace("pillar: %s" % i['pillar']) - log.trace("action: %s" % i['action']) + log.trace("action: %s" % i['actions']) pillarFiles = i['files'] pillar = i['pillar'] - action = i['action'] + actions = i['actions'] patterns = pillar.split(".") log.trace("pillar: %s" % pillar) @@ -49,23 +52,45 @@ def start(fpa, interval=10): if re.search(patterns[patternFound], line): log.trace("PILLARWATCH FOUND: %s" % patterns[patternFound]) patternFound += 1 - # we have found the final key in the pillar that we are looking for, get the value + # we have found the final key in the pillar that we are looking for, get the previous value then the current value if patternFound == len(patterns): + # at this point, df is equal to the contents of the pillarWatch file that is used to tract the previous values of the pillars + previousPillarValue = 'PREVIOUSPILLARVALUENOTSAVEDINDATAFILE' for l in df.splitlines(): if pillar in l: - previousPillarValue = l.split(":")[1] - log.info("%s previousPillarValue:%s" % (pillar, str(previousPillarValue))) + previousPillarValue = l.split(":")[1].strip() + log.info("%s previousPillarValue: %s" % (pillar, str(previousPillarValue))) currentPillarValue = str(line.split(":")[1]).strip() log.info("%s currentPillarValue: %s" % (pillar,currentPillarValue)) if pillar in df: df = re.sub(r"\b{}\b.*".format(pillar), pillar + ': ' + currentPillarValue, df) - #df = df.replace(pillar, pillar + ': ' + currentPillarValue) else: - df = pillar + ': ' + currentPillarValue + df += pillar + ': ' + currentPillarValue + '\n' log.info("df: %s" % df) - #currentValues.append(pillar + ":" + currentPillarValue) # we have found the pillar so we dont need to loop throught the file anymore break + if currentPillarValue != previousPillarValue: + log.info("cPV != pPV: %s != %s" % (currentPillarValue,previousPillarValue)) + if previousPillarValue in actions['from']: + ACTIONS=actions['from'][previousPillarValue]['to'][currentPillarValue] + elif '*' in actions['from']: + # need more logic here for to and from + ACTIONS=actions['from']['*']['to']['*'] + else: + ACTIONS='FROM TO NOT DEFINED' + #for f in actions: + log.info("actions: %s" % actions['from']) + log.info("ACTIONS: %s" % ACTIONS) + for action in ACTIONS: + log.info(action) + for saltModule, args in action.items(): + log.info(saltModule) + log.info(args) + # args=list(action.values())[0] + # log.info(args) + whatHappened = __salt__[saltModule](**args) + log.info("whatHappened: %s" % whatHappened) + dataFile.seek(0) dataFile.write(df) dataFile.truncate() diff --git a/salt/salt/files/engines.conf b/salt/salt/files/engines.conf index 7c43e99e1..26be7bf37 100644 --- a/salt/salt/files/engines.conf +++ b/salt/salt/files/engines.conf @@ -4,3 +4,33 @@ engines_dirs: engines: - checkmine: interval: 60 + - pillarWatch: + fpa: + - files: + - /opt/so/saltstack/local/pillar/global/soc_global.sls + - /opt/so/saltstack/local/pillar/global/adv_global.sls + pillar: global.pipeline + actions: + from: + REDIS: + to: + KAFKA: + - cmd.run: + cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled True + KAFKA: + to: + REDIS: + - cmd.run: + cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled False + - files: + - /opt/so/saltstack/local/pillar/idstools/soc_idstools.sls + - /opt/so/saltstack/local/pillar/idstools/adv_idstools.sls + pillar: idstools.config.ruleset + actions: + from: + '*': + to: + '*': + - cmd.run: + cmd: /usr/sbin/so-rule-update + interval: 10 From 8d9aae198364cda0b0e9ae9daef9e3efc9c482f9 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Thu, 18 Apr 2024 09:28:30 -0400 Subject: [PATCH 410/777] FEATURE: Add queue=True to so-checkin so that it will wait for any running states #12815 --- salt/common/tools/sbin/so-checkin | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/salt/common/tools/sbin/so-checkin b/salt/common/tools/sbin/so-checkin index db35af410..13ea9acd9 100755 --- a/salt/common/tools/sbin/so-checkin +++ b/salt/common/tools/sbin/so-checkin @@ -5,8 +5,13 @@ # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. - - . /usr/sbin/so-common -salt-call state.highstate -l info +cat << EOF + +so-checkin will run a full salt highstate to apply all salt states. If a highstate is already running, this request will be queued and so it may pause for a few minutes before you see any more output. For more information about so-checkin and salt, please see: +https://docs.securityonion.net/en/2.4/salt.html + +EOF + +salt-call state.highstate -l info queue=True From 506bbd314df0eff9fbd6a24f60135991e5270818 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 18 Apr 2024 10:26:10 -0400 Subject: [PATCH 411/777] more comments, better logging --- salt/salt/engines/master/pillarWatch.py | 96 +++++++++++++++---------- salt/salt/files/engines.conf | 4 +- 2 files changed, 60 insertions(+), 40 deletions(-) diff --git a/salt/salt/engines/master/pillarWatch.py b/salt/salt/engines/master/pillarWatch.py index c6f343dc8..b19a6d709 100644 --- a/salt/salt/engines/master/pillarWatch.py +++ b/salt/salt/engines/master/pillarWatch.py @@ -1,17 +1,19 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + # -*- coding: utf-8 -*- import logging -from time import sleep -import os -import salt.client import re -from ast import literal_eval +import salt.client log = logging.getLogger(__name__) local = salt.client.LocalClient() def start(fpa, interval=10): - log.info("##### PILLARWATCH STARTED #####") + log.info("pillarWatch engine: started") # try to open the file that stores the previous runs data # if the file doesn't exist, create a blank one @@ -19,77 +21,95 @@ def start(fpa, interval=10): # maybe change this location dataFile = open("/opt/so/state/pillarWatch.txt", "r+") except FileNotFoundError: - log.info("No previous pillarWatch data saved") + log.warn("pillarWatch engine: No previous pillarWatch data saved") dataFile = open("/opt/so/state/pillarWatch.txt", "w+") df = dataFile.read() - log.info("df: %s" % str(df)) - - log.info("FPA: %s" % str(fpa)) for i in fpa: - log.trace("files: %s" % i['files']) - log.trace("pillar: %s" % i['pillar']) - log.trace("action: %s" % i['actions']) + log.trace("pillarWatch engine: files: %s" % i['files']) + log.trace("pillarWatch engine: pillar: %s" % i['pillar']) + log.trace("pillarWatch engine: actions: %s" % i['actions']) pillarFiles = i['files'] pillar = i['pillar'] actions = i['actions'] - + # these are the keys that we are going to look for as we traverse the pillarFiles patterns = pillar.split(".") - log.trace("pillar: %s" % pillar) - log.trace("patterns: %s" % patterns) - log.trace("patterns length: %i" % len(patterns)) # this var is used to track how many times the pattern has been found in the pillar file so that we can access the proper index later patternFound = 0 for pillarFile in pillarFiles: with open(pillarFile, "r") as file: - log.info("checking file: %s" % pillarFile) + log.info("pillarWatch engine: checking file: %s" % pillarFile) for line in file: - log.trace("line: %s" % line) + log.trace("pillarWatch engine: inspecting line: %s in file: %s" % (line, file)) log.trace("pillarWatch engine: looking for: %s" % patterns[patternFound]) # since we are looping line by line through a pillar file, the next line will check if each line matches the progression of keys through the pillar # ex. if we are looking for the value of global.pipeline, then this will loop through the pillar file until 'global' is found, then it will look # for pipeline. once pipeline is found, it will record the value - if re.search(patterns[patternFound], line): - log.trace("PILLARWATCH FOUND: %s" % patterns[patternFound]) + if re.search(patterns[patternFound] + ':', line): + # strip the newline because it makes the logs u-g-l-y + log.info("pillarWatch engine: found: %s" % line.strip('\n')) patternFound += 1 # we have found the final key in the pillar that we are looking for, get the previous value then the current value if patternFound == len(patterns): # at this point, df is equal to the contents of the pillarWatch file that is used to tract the previous values of the pillars previousPillarValue = 'PREVIOUSPILLARVALUENOTSAVEDINDATAFILE' + # check the contents of the dataFile that stores the previousPillarValue(s). + # find if the pillar we are checking for changes has previously been saved. if so, grab it's prior value for l in df.splitlines(): if pillar in l: - previousPillarValue = l.split(":")[1].strip() - log.info("%s previousPillarValue: %s" % (pillar, str(previousPillarValue))) + previousPillarValue = str(l.split(":")[1].strip()) + log.info("pillarWatch engine: %s previousPillarValue: %s" % (pillar, previousPillarValue)) currentPillarValue = str(line.split(":")[1]).strip() - log.info("%s currentPillarValue: %s" % (pillar,currentPillarValue)) + log.info("pillarWatch engine: %s currentPillarValue: %s" % (pillar, currentPillarValue)) + # if the pillar we are checking for changes has been defined in the dataFile, + # replace the previousPillarValue with the currentPillarValue. if it isn't in there, append it. if pillar in df: df = re.sub(r"\b{}\b.*".format(pillar), pillar + ': ' + currentPillarValue, df) else: df += pillar + ': ' + currentPillarValue + '\n' - log.info("df: %s" % df) + log.trace("pillarWatch engine: df: %s" % df) # we have found the pillar so we dont need to loop throught the file anymore break + # if the pillar value changed, then we find what actions we should take + log.info("pillarWatch engine: checking if currentPillarValue != previousPillarValue") + log.info("pillarWatch engine: %s currentPillarValue: %s" % (pillar, currentPillarValue)) + log.info("pillarWatch engine: %s previousPillarValue: %s" % (pillar, previousPillarValue)) if currentPillarValue != previousPillarValue: - log.info("cPV != pPV: %s != %s" % (currentPillarValue,previousPillarValue)) + log.info("pillarWatch engine: currentPillarValue != previousPillarValue: %s != %s" % (currentPillarValue, previousPillarValue)) + # check if the previous pillar value is defined in the pillar from -> to actions if previousPillarValue in actions['from']: - ACTIONS=actions['from'][previousPillarValue]['to'][currentPillarValue] + # check if the new / current pillar value is defined under to + if currentPillarValue in actions['from'][previousPillarValue]['to']: + ACTIONS=actions['from'][previousPillarValue]['to'][currentPillarValue] + # if the new / current pillar value isn't defined under to, is there a wildcard defined + elif '*' in actions['from'][previousPillarValue]['to']: + ACTIONS=actions['from'][previousPillarValue]['to']['*'] + # no action was defined for us to take when we see the pillar change + else: + ACTIONS='NO DEFINED ACTION FOR US TO TAKE' + # if the previous pillar wasn't defined in the actions from, is there a wildcard defined for the pillar that we are changing from elif '*' in actions['from']: + # is the new pillar value defined for the wildcard match + if currentPillarValue in actions['from']['*']['to']: + ACTIONS=actions['from']['*']['to'][currentPillarValue] + # if the new pillar doesn't have an action, was a wildcard defined + elif '*' in actions['from']['*']['to']: # need more logic here for to and from - ACTIONS=actions['from']['*']['to']['*'] + ACTIONS=actions['from']['*']['to']['*'] + else: + ACTIONS='NO DEFINED ACTION FOR US TO TAKE' + # a match for the previous pillar wasn't defined in the action in either the form of a direct match or wildcard else: - ACTIONS='FROM TO NOT DEFINED' - #for f in actions: - log.info("actions: %s" % actions['from']) - log.info("ACTIONS: %s" % ACTIONS) + ACTIONS='NO DEFINED ACTION FOR US TO TAKE' + log.info("pillarWatch engine: actions: %s" % actions['from']) + log.info("pillarWatch engine: ACTIONS: %s" % ACTIONS) for action in ACTIONS: - log.info(action) + log.info("pillarWatch engine: action: %s" % action) for saltModule, args in action.items(): - log.info(saltModule) - log.info(args) - # args=list(action.values())[0] - # log.info(args) - whatHappened = __salt__[saltModule](**args) - log.info("whatHappened: %s" % whatHappened) + log.info("pillarWatch engine: saltModule: %s" % saltModule) + log.info("pillarWatch engine: args: %s" % args) + actionReturn = __salt__[saltModule](**args) + log.info("pillarWatch engine: actionReturn: %s" % actionReturn) dataFile.seek(0) dataFile.write(df) diff --git a/salt/salt/files/engines.conf b/salt/salt/files/engines.conf index 26be7bf37..de5c1e43a 100644 --- a/salt/salt/files/engines.conf +++ b/salt/salt/files/engines.conf @@ -12,14 +12,14 @@ engines: pillar: global.pipeline actions: from: - REDIS: + '*': to: KAFKA: - cmd.run: cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled True KAFKA: to: - REDIS: + '*': - cmd.run: cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled False - files: From 610dd2c08d3907eac18b2a1841f7fc4d5d6665d1 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 18 Apr 2024 11:11:14 -0400 Subject: [PATCH 412/777] improve it --- salt/salt/engines/master/pillarWatch.py | 27 ++++++++++++++----------- salt/salt/files/engines.conf | 8 ++++++++ 2 files changed, 23 insertions(+), 12 deletions(-) diff --git a/salt/salt/engines/master/pillarWatch.py b/salt/salt/engines/master/pillarWatch.py index b19a6d709..abbb632f8 100644 --- a/salt/salt/engines/master/pillarWatch.py +++ b/salt/salt/engines/master/pillarWatch.py @@ -13,7 +13,7 @@ log = logging.getLogger(__name__) local = salt.client.LocalClient() def start(fpa, interval=10): - log.info("pillarWatch engine: started") + log.info("pillarWatch engine: checking watched pillars for changes") # try to open the file that stores the previous runs data # if the file doesn't exist, create a blank one @@ -26,6 +26,8 @@ def start(fpa, interval=10): df = dataFile.read() for i in fpa: + currentPillarValue = '' + previousPillarValue = '' log.trace("pillarWatch engine: files: %s" % i['files']) log.trace("pillarWatch engine: pillar: %s" % i['pillar']) log.trace("pillarWatch engine: actions: %s" % i['actions']) @@ -58,9 +60,9 @@ def start(fpa, interval=10): for l in df.splitlines(): if pillar in l: previousPillarValue = str(l.split(":")[1].strip()) - log.info("pillarWatch engine: %s previousPillarValue: %s" % (pillar, previousPillarValue)) currentPillarValue = str(line.split(":")[1]).strip() log.info("pillarWatch engine: %s currentPillarValue: %s" % (pillar, currentPillarValue)) + log.info("pillarWatch engine: %s previousPillarValue: %s" % (pillar, previousPillarValue)) # if the pillar we are checking for changes has been defined in the dataFile, # replace the previousPillarValue with the currentPillarValue. if it isn't in there, append it. if pillar in df: @@ -86,7 +88,7 @@ def start(fpa, interval=10): ACTIONS=actions['from'][previousPillarValue]['to']['*'] # no action was defined for us to take when we see the pillar change else: - ACTIONS='NO DEFINED ACTION FOR US TO TAKE' + ACTIONS=['NO DEFINED ACTION FOR US TO TAKE'] # if the previous pillar wasn't defined in the actions from, is there a wildcard defined for the pillar that we are changing from elif '*' in actions['from']: # is the new pillar value defined for the wildcard match @@ -97,19 +99,20 @@ def start(fpa, interval=10): # need more logic here for to and from ACTIONS=actions['from']['*']['to']['*'] else: - ACTIONS='NO DEFINED ACTION FOR US TO TAKE' + ACTIONS=['NO DEFINED ACTION FOR US TO TAKE'] # a match for the previous pillar wasn't defined in the action in either the form of a direct match or wildcard else: - ACTIONS='NO DEFINED ACTION FOR US TO TAKE' - log.info("pillarWatch engine: actions: %s" % actions['from']) - log.info("pillarWatch engine: ACTIONS: %s" % ACTIONS) + ACTIONS=['NO DEFINED ACTION FOR US TO TAKE'] + log.info("pillarWatch engine: all defined actions: %s" % actions['from']) + log.info("pillarWatch engine: ACTIONS: %s chosen based on previousPillarValue: %s switching to currentPillarValue: %s" % (ACTIONS, previousPillarValue, currentPillarValue)) for action in ACTIONS: log.info("pillarWatch engine: action: %s" % action) - for saltModule, args in action.items(): - log.info("pillarWatch engine: saltModule: %s" % saltModule) - log.info("pillarWatch engine: args: %s" % args) - actionReturn = __salt__[saltModule](**args) - log.info("pillarWatch engine: actionReturn: %s" % actionReturn) + if action != 'NO DEFINED ACTION FOR US TO TAKE': + for saltModule, args in action.items(): + log.info("pillarWatch engine: saltModule: %s" % saltModule) + log.info("pillarWatch engine: args: %s" % args) + actionReturn = __salt__[saltModule](**args) + log.info("pillarWatch engine: actionReturn: %s" % actionReturn) dataFile.seek(0) dataFile.write(df) diff --git a/salt/salt/files/engines.conf b/salt/salt/files/engines.conf index de5c1e43a..c15194eaf 100644 --- a/salt/salt/files/engines.conf +++ b/salt/salt/files/engines.conf @@ -17,11 +17,19 @@ engines: KAFKA: - cmd.run: cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled True + - cmd.run: + cmd: salt-call saltutil.kill_all_jobs + - cmd.run: + cmd: salt-call state.highstate & KAFKA: to: '*': - cmd.run: cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled False + - cmd.run: + cmd: salt-call saltutil.kill_all_jobs + - cmd.run: + cmd: salt-call state.highstate & - files: - /opt/so/saltstack/local/pillar/idstools/soc_idstools.sls - /opt/so/saltstack/local/pillar/idstools/adv_idstools.sls From 6c6647629ca042b56ac989ee71fe7bfe10756a83 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Thu, 18 Apr 2024 11:32:17 -0400 Subject: [PATCH 413/777] Refactor yara for compilation --- salt/strelka/backend/enabled.sls | 2 +- salt/strelka/compile_yara/compile_yara.py | 62 ++++++++++++++++++++--- salt/strelka/config.sls | 2 +- salt/strelka/defaults.yaml | 2 +- 4 files changed, 57 insertions(+), 11 deletions(-) diff --git a/salt/strelka/backend/enabled.sls b/salt/strelka/backend/enabled.sls index 0df764a6e..a626924b1 100644 --- a/salt/strelka/backend/enabled.sls +++ b/salt/strelka/backend/enabled.sls @@ -17,7 +17,7 @@ strelka_backend: - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-strelka-backend:{{ GLOBALS.so_version }} - binds: - /opt/so/conf/strelka/backend/:/etc/strelka/:ro - - /opt/so/conf/strelka/rules/:/etc/yara/:ro + - /opt/so/conf/strelka/rules/compiled/:/etc/yara/:ro {% if DOCKER.containers['so-strelka-backend'].custom_bind_mounts %} {% for BIND in DOCKER.containers['so-strelka-backend'].custom_bind_mounts %} - {{ BIND }} diff --git a/salt/strelka/compile_yara/compile_yara.py b/salt/strelka/compile_yara/compile_yara.py index b840dcab6..2138c73d1 100644 --- a/salt/strelka/compile_yara/compile_yara.py +++ b/salt/strelka/compile_yara/compile_yara.py @@ -6,14 +6,60 @@ import os import yara import glob -import sys +import json +from concurrent.futures import ThreadPoolExecutor -def compile_yara_rules(rules_dir: str) -> None: - compiled_rules_path: str = os.path.join(rules_dir, "rules.yar.compiled") - rule_files: list[str] = glob.glob(os.path.join(rules_dir, '**/*.yar'), recursive=True) +def check_syntax(rule_file): + try: + # Testing if compilation throws a syntax error, don't save the result + yara.compile(filepath=rule_file) + return (True, rule_file, None) + except yara.SyntaxError as e: + # Return the error message for logging purposes + return (False, rule_file, str(e)) - if rule_files: - rules: yara.Rules = yara.compile(filepaths={os.path.basename(f): f for f in rule_files}) - rules.save(compiled_rules_path) +def compile_yara_rules(rules_dir): + compiled_dir = os.path.join(rules_dir, "compiled") + compiled_rules_path = os.path.join(compiled_dir, "rules.compiled") + rule_files = glob.glob(os.path.join(rules_dir, '**/*.yar'), recursive=True) + files_to_compile = {} + removed_count = 0 + success_count = 0 + + # Use ThreadPoolExecutor to parallelize syntax checks + with ThreadPoolExecutor() as executor: + results = executor.map(check_syntax, rule_files) + + # Collect yara files and prepare for batch compilation + for success, rule_file, error_message in results: + if success: + files_to_compile[os.path.basename(rule_file)] = rule_file + success_count += 1 + else: + # Extract just the UUID from the rule file name + rule_id = os.path.splitext(os.path.basename(rule_file))[0] + log_entry = { + "event.module": "soc", + "event.dataset": "soc.detections", + "log.level": "error", + "error.message": error_message, + "detection_type": "yara", + "rule.uuid": rule_id, + "error.type": "runtime_status" + } + with open('/opt/sensoroni/logs/detections_runtime-status_yara.log', 'a') as log_file: + json.dump(log_entry, log_file) + log_file.write('\n') # Ensure new entries start on new lines + os.remove(rule_file) + removed_count += 1 -compile_yara_rules(sys.argv[1]) + # Compile all remaining valid rules into a single file + if files_to_compile: + compiled_rules = yara.compile(filepaths=files_to_compile) + compiled_rules.save(compiled_rules_path) + print(f"All remaining rules compiled and saved into {compiled_rules_path}") + + # Print summary of compilation results + print(f"Summary: {success_count} rules compiled successfully, {removed_count} rules removed due to errors.") + +compile_yara_rules("/opt/sensoroni/yara/rules/") \ No newline at end of file diff --git a/salt/strelka/config.sls b/salt/strelka/config.sls index f6c66d8ff..90bba58a7 100644 --- a/salt/strelka/config.sls +++ b/salt/strelka/config.sls @@ -9,7 +9,7 @@ # Strelka config strelkaconfdir: file.directory: - - name: /opt/so/conf/strelka + - name: /opt/so/conf/strelka/rules/compiled/ - user: 939 - group: 939 - makedirs: True diff --git a/salt/strelka/defaults.yaml b/salt/strelka/defaults.yaml index da259fa14..f91ad8691 100644 --- a/salt/strelka/defaults.yaml +++ b/salt/strelka/defaults.yaml @@ -563,7 +563,7 @@ strelka: options: location: '/etc/yara/' compiled: - enabled: False + enabled: True filename: "rules.compiled" store_offset: True offset_meta_key: "StrelkaHexDump" From 229a9899143b555e1527a6e3dfdfa8cdb2ff67e2 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Thu, 18 Apr 2024 11:47:01 -0400 Subject: [PATCH 414/777] Update so-elasticsearch-cluster-space-total --- .../tools/sbin_jinja/so-elasticsearch-cluster-space-total | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-cluster-space-total b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-cluster-space-total index 3faa2a7a9..b73967c26 100755 --- a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-cluster-space-total +++ b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-cluster-space-total @@ -40,7 +40,7 @@ fi # Iterate through the output of _cat/allocation for each node in the cluster to determine the total available space {% if GLOBALS.role == 'so-manager' %} -for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v {{ GLOBALS.manager }} | awk '{print $5}'); do +for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v "{{ GLOBALS.manager }}$" | awk '{print $5}'); do {% else %} for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | awk '{print $5}'); do {% endif %} From 406dda60511015a215ddfae08ecb885aadd17389 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Thu, 18 Apr 2024 11:48:15 -0400 Subject: [PATCH 415/777] Update so-elasticsearch-cluster-space-used --- .../tools/sbin_jinja/so-elasticsearch-cluster-space-used | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-cluster-space-used b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-cluster-space-used index 5d8a60e22..49e634853 100755 --- a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-cluster-space-used +++ b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-cluster-space-used @@ -13,7 +13,7 @@ TOTAL_USED_SPACE=0 # Iterate through the output of _cat/allocation for each node in the cluster to determine the total used space {% if GLOBALS.role == 'so-manager' %} # Get total disk space - disk.total -for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v {{ GLOBALS.manager }} | awk '{print $3}'); do +for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v "{{ GLOBALS.manager }}$" | awk '{print $3}'); do {% else %} # Get disk space taken up by indices - disk.indices for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | awk '{print $2}'); do From 5cc358de4ef20965b708436c6fba707684227760 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 18 Apr 2024 11:58:25 -0400 Subject: [PATCH 416/777] Update map files to handle empty kafka:nodes pillar Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/map.jinja | 4 ++-- salt/kafka/nodes.map.jinja | 9 ++++++--- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/salt/kafka/map.jinja b/salt/kafka/map.jinja index 771e6102b..56f85144a 100644 --- a/salt/kafka/map.jinja +++ b/salt/kafka/map.jinja @@ -6,13 +6,13 @@ {% import_yaml 'kafka/defaults.yaml' as KAFKADEFAULTS %} {% set KAFKAMERGED = salt['pillar.get']('kafka', KAFKADEFAULTS.kafka, merge=True) %} {% from 'vars/globals.map.jinja' import GLOBALS %} +{% from 'kafka/nodes.map.jinja' import COMBINED_KAFKANODES %} {% do KAFKAMERGED.config.server.update({ 'node_x_id': salt['pillar.get']('kafka:nodes:' ~ GLOBALS.hostname ~ ':nodeid')}) %} {% do KAFKAMERGED.config.server.update({'advertised_x_listeners': 'BROKER://' ~ GLOBALS.node_ip ~ ':9092'}) %} -{% set nodes = salt['pillar.get']('kafka:nodes', {}) %} {% set combined = [] %} -{% for hostname, data in nodes.items() %} +{% for hostname, data in COMBINED_KAFKANODES.items() %} {% do combined.append(data.nodeid ~ "@" ~ hostname ~ ":9093") %} {% endfor %} {% set kraft_controller_quorum_voters = ','.join(combined) %} diff --git a/salt/kafka/nodes.map.jinja b/salt/kafka/nodes.map.jinja index a6e36d36a..5d74e9e1c 100644 --- a/salt/kafka/nodes.map.jinja +++ b/salt/kafka/nodes.map.jinja @@ -4,7 +4,8 @@ {% set existing_ids = [] %} {# Check STORED_KAFKANODES for existing kafka nodes and pull their IDs so they are not reused across the grid #} -{% if STORED_KAFKANODES.get('nodes', {}).items() | length > 0 %} +{# {% if STORED_KAFKANODES.get('nodes', {}).items() | length > 0 %} #} +{% if STORED_KAFKANODES != none %} {% for node, values in STORED_KAFKANODES.nodes.items() %} {% if values.get('nodeid') %} {% do existing_ids.append(values['nodeid']) %} @@ -28,7 +29,8 @@ {% set NEW_KAFKANODES = {} %} {% for minionid, ip in current_kafkanodes.items() %} {% set hostname = minionid.split('_')[0] %} -{% if STORED_KAFKANODES.get('nodes', {}).items() | length > 0 and hostname not in STORED_KAFKANODES.nodes %} +{# {% if STORED_KAFKANODES.get('nodes', {}).items() | length > 0 and hostname not in STORED_KAFKANODES.nodes %} #} +{% if STORED_KAFKANODES != none and hostname not in STORED_KAFKANODES.nodes %} {% set new_id = available_ids.pop(0) %} {% do NEW_KAFKANODES.update({hostname: {'nodeid': new_id, 'ip': ip[0]}}) %} {% endif %} @@ -43,7 +45,8 @@ {% for node, details in NEW_KAFKANODES.items() %} {% do COMBINED_KAFKANODES.update({node: details}) %} {% endfor %} -{% if STORED_KAFKANODES.get('nodes', {}).items() | length > 0 %} +{# {% if STORED_KAFKANODES.get('nodes', {}).items() | length > 0 %} #} +{% if STORED_KAFKANODES != none %} {% for node, details in STORED_KAFKANODES.nodes.items() %} {% do COMBINED_KAFKANODES.update({node: details}) %} {% endfor %} From 1f6eb9cdc34e496979c25223c32721fb9ad838b6 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 18 Apr 2024 13:50:37 -0400 Subject: [PATCH 417/777] match keys better. go through files reverse first found is prio --- salt/salt/engines/master/pillarWatch.py | 24 +++++++++++++++--------- salt/salt/files/engines.conf | 1 + 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/salt/salt/engines/master/pillarWatch.py b/salt/salt/engines/master/pillarWatch.py index abbb632f8..9f85a07c4 100644 --- a/salt/salt/engines/master/pillarWatch.py +++ b/salt/salt/engines/master/pillarWatch.py @@ -26,8 +26,6 @@ def start(fpa, interval=10): df = dataFile.read() for i in fpa: - currentPillarValue = '' - previousPillarValue = '' log.trace("pillarWatch engine: files: %s" % i['files']) log.trace("pillarWatch engine: pillar: %s" % i['pillar']) log.trace("pillarWatch engine: actions: %s" % i['actions']) @@ -36,9 +34,12 @@ def start(fpa, interval=10): actions = i['actions'] # these are the keys that we are going to look for as we traverse the pillarFiles patterns = pillar.split(".") - # this var is used to track how many times the pattern has been found in the pillar file so that we can access the proper index later - patternFound = 0 - for pillarFile in pillarFiles: + # check the pillar files in reveresed order to replicate the same hierarchy as the pillar top file + for pillarFile in reversed(pillarFiles): + currentPillarValue = '' + previousPillarValue = '' + # this var is used to track how many times the pattern has been found in the pillar file so that we can access the proper index later + patternFound = 0 with open(pillarFile, "r") as file: log.info("pillarWatch engine: checking file: %s" % pillarFile) for line in file: @@ -47,7 +48,7 @@ def start(fpa, interval=10): # since we are looping line by line through a pillar file, the next line will check if each line matches the progression of keys through the pillar # ex. if we are looking for the value of global.pipeline, then this will loop through the pillar file until 'global' is found, then it will look # for pipeline. once pipeline is found, it will record the value - if re.search(patterns[patternFound] + ':', line): + if re.search('^' + patterns[patternFound] + ':', line.strip()): # strip the newline because it makes the logs u-g-l-y log.info("pillarWatch engine: found: %s" % line.strip('\n')) patternFound += 1 @@ -70,8 +71,12 @@ def start(fpa, interval=10): else: df += pillar + ': ' + currentPillarValue + '\n' log.trace("pillarWatch engine: df: %s" % df) - # we have found the pillar so we dont need to loop throught the file anymore + # we have found the pillar so we dont need to loop through the file anymore break + # if key and value was found in the first file, then we don't want to look in + # any more files since we use the first file as the source of truth. + if patternFound == len(patterns): + break # if the pillar value changed, then we find what actions we should take log.info("pillarWatch engine: checking if currentPillarValue != previousPillarValue") log.info("pillarWatch engine: %s currentPillarValue: %s" % (pillar, currentPillarValue)) @@ -111,8 +116,9 @@ def start(fpa, interval=10): for saltModule, args in action.items(): log.info("pillarWatch engine: saltModule: %s" % saltModule) log.info("pillarWatch engine: args: %s" % args) - actionReturn = __salt__[saltModule](**args) - log.info("pillarWatch engine: actionReturn: %s" % actionReturn) + __salt__[saltModule](**args) + #actionReturn = __salt__[saltModule](**args) + #log.info("pillarWatch engine: actionReturn: %s" % actionReturn) dataFile.seek(0) dataFile.write(df) diff --git a/salt/salt/files/engines.conf b/salt/salt/files/engines.conf index c15194eaf..3066f588c 100644 --- a/salt/salt/files/engines.conf +++ b/salt/salt/files/engines.conf @@ -6,6 +6,7 @@ engines: interval: 60 - pillarWatch: fpa: + # these files will be checked in reversed order to replicate the same hierarchy as the pillar top file - files: - /opt/so/saltstack/local/pillar/global/soc_global.sls - /opt/so/saltstack/local/pillar/global/adv_global.sls From fe81ffaf78929c0d83dc3d465d6153b4c5d66070 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 18 Apr 2024 15:11:22 -0400 Subject: [PATCH 418/777] Variables no longer used. Replaced by map file Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/config.sls | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/salt/kafka/config.sls b/salt/kafka/config.sls index c856c4f80..b1a31d23f 100644 --- a/salt/kafka/config.sls +++ b/salt/kafka/config.sls @@ -7,23 +7,6 @@ {% if sls.split('.')[0] in allowed_states %} {% from 'vars/globals.map.jinja' import GLOBALS %} -{% set kafka_ips_logstash = [] %} -{% set kafka_ips_kraft = [] %} -{% set kafkanodes = salt['pillar.get']('kafka:nodes', {}) %} -{% set kafka_ip = GLOBALS.node_ip %} - -{# Create list for kafka <-> logstash/searchnode communcations #} -{% for node, node_data in kafkanodes.items() %} -{% do kafka_ips_logstash.append(node_data['ip'] + ":9092") %} -{% endfor %} -{% set kafka_server_list = "','".join(kafka_ips_logstash) %} - -{# Create a list for kraft controller <-> kraft controller communications. Used for Kafka metadata management #} -{% for node, node_data in kafkanodes.items() %} -{% do kafka_ips_kraft.append(node_data['nodeid'] ~ "@" ~ node_data['ip'] ~ ":9093") %} -{% endfor %} -{% set kraft_server_list = "','".join(kafka_ips_kraft) %} - include: - ssl From 746128e37b214251716d85702239464e3e3d8a4c Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 18 Apr 2024 15:13:29 -0400 Subject: [PATCH 419/777] update so-kafka-clusterid This is a temporary script used to setup kafka secret and clusterid needed for kafka to start. This scripts functionality will be replaced by soup/setup scripts Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/nodes.map.jinja | 2 +- salt/manager/tools/sbin/so-kafka-clusterid | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/kafka/nodes.map.jinja b/salt/kafka/nodes.map.jinja index 5d74e9e1c..36f789259 100644 --- a/salt/kafka/nodes.map.jinja +++ b/salt/kafka/nodes.map.jinja @@ -1,5 +1,5 @@ {% set current_kafkanodes = salt.saltutil.runner('mine.get', tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-receiver', fun='network.ip_addrs', tgt_type='compound') %} -{% set STORED_KAFKANODES = salt['pillar.get']('kafka:nodes', {}) %} +{% set STORED_KAFKANODES = salt['pillar.get']('kafka', {}) %} {% set existing_ids = [] %} diff --git a/salt/manager/tools/sbin/so-kafka-clusterid b/salt/manager/tools/sbin/so-kafka-clusterid index 7ac055997..829e4fc87 100644 --- a/salt/manager/tools/sbin/so-kafka-clusterid +++ b/salt/manager/tools/sbin/so-kafka-clusterid @@ -20,10 +20,10 @@ fi if ! grep -q "^ cluster_id:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then kafka_cluster_id=$(get_random_value 22) - echo 'kafka: ' > $local_salt_dir/pillar/kafka/soc_kafka.sls echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls +fi if ! grep -q "^ kafkapass:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then kafkapass=$(get_random_value) echo ' kafkapass: '$kafkapass >> $local_salt_dir/pillar/kafka/soc_kafka.sls -fi +fi \ No newline at end of file From 4ac04a1a4671d0d2ff3e486801ab4b4f50cebcbb Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 18 Apr 2024 16:46:36 -0400 Subject: [PATCH 420/777] add kafkapass soc annotation Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/soc_kafka.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/salt/kafka/soc_kafka.yaml b/salt/kafka/soc_kafka.yaml index 8a6c516a9..500ad59c3 100644 --- a/salt/kafka/soc_kafka.yaml +++ b/salt/kafka/soc_kafka.yaml @@ -9,6 +9,10 @@ kafka: sensitive: True helpLink: kafka.html config: + kafkapass: + description: The password to use for the Kafka certificates. + sensitive: True + helpLink: kafka.html server: advertised_x_listeners: description: Specify the list of listeners (hostname and port) that Kafka brokers provide to clients for communication. From 6c5e0579cf01a106ff16347e7817b40407e46744 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 19 Apr 2024 09:32:32 -0400 Subject: [PATCH 421/777] logging changes. ensure salt master has pillarWatch engine --- salt/salt/engines/master/pillarWatch.py | 35 ++++++++++++------------- salt/salt/files/engines.conf | 16 +++++------ salt/salt/master.sls | 5 ++++ 3 files changed, 30 insertions(+), 26 deletions(-) diff --git a/salt/salt/engines/master/pillarWatch.py b/salt/salt/engines/master/pillarWatch.py index 9f85a07c4..0f6f0ba6a 100644 --- a/salt/salt/engines/master/pillarWatch.py +++ b/salt/salt/engines/master/pillarWatch.py @@ -7,13 +7,14 @@ import logging import re -import salt.client - log = logging.getLogger(__name__) -local = salt.client.LocalClient() + +# will need this in future versions of this engine +#import salt.client +#local = salt.client.LocalClient() def start(fpa, interval=10): - log.info("pillarWatch engine: checking watched pillars for changes") + log.info("pillarWatch engine: ##### checking watched pillars for changes #####") # try to open the file that stores the previous runs data # if the file doesn't exist, create a blank one @@ -41,7 +42,7 @@ def start(fpa, interval=10): # this var is used to track how many times the pattern has been found in the pillar file so that we can access the proper index later patternFound = 0 with open(pillarFile, "r") as file: - log.info("pillarWatch engine: checking file: %s" % pillarFile) + log.debug("pillarWatch engine: checking file: %s" % pillarFile) for line in file: log.trace("pillarWatch engine: inspecting line: %s in file: %s" % (line, file)) log.trace("pillarWatch engine: looking for: %s" % patterns[patternFound]) @@ -50,7 +51,7 @@ def start(fpa, interval=10): # for pipeline. once pipeline is found, it will record the value if re.search('^' + patterns[patternFound] + ':', line.strip()): # strip the newline because it makes the logs u-g-l-y - log.info("pillarWatch engine: found: %s" % line.strip('\n')) + log.debug("pillarWatch engine: found: %s" % line.strip('\n')) patternFound += 1 # we have found the final key in the pillar that we are looking for, get the previous value then the current value if patternFound == len(patterns): @@ -62,8 +63,8 @@ def start(fpa, interval=10): if pillar in l: previousPillarValue = str(l.split(":")[1].strip()) currentPillarValue = str(line.split(":")[1]).strip() - log.info("pillarWatch engine: %s currentPillarValue: %s" % (pillar, currentPillarValue)) - log.info("pillarWatch engine: %s previousPillarValue: %s" % (pillar, previousPillarValue)) + log.debug("pillarWatch engine: %s currentPillarValue: %s" % (pillar, currentPillarValue)) + log.debug("pillarWatch engine: %s previousPillarValue: %s" % (pillar, previousPillarValue)) # if the pillar we are checking for changes has been defined in the dataFile, # replace the previousPillarValue with the currentPillarValue. if it isn't in there, append it. if pillar in df: @@ -78,9 +79,7 @@ def start(fpa, interval=10): if patternFound == len(patterns): break # if the pillar value changed, then we find what actions we should take - log.info("pillarWatch engine: checking if currentPillarValue != previousPillarValue") - log.info("pillarWatch engine: %s currentPillarValue: %s" % (pillar, currentPillarValue)) - log.info("pillarWatch engine: %s previousPillarValue: %s" % (pillar, previousPillarValue)) + log.debug("pillarWatch engine: checking if currentPillarValue != previousPillarValue") if currentPillarValue != previousPillarValue: log.info("pillarWatch engine: currentPillarValue != previousPillarValue: %s != %s" % (currentPillarValue, previousPillarValue)) # check if the previous pillar value is defined in the pillar from -> to actions @@ -108,17 +107,17 @@ def start(fpa, interval=10): # a match for the previous pillar wasn't defined in the action in either the form of a direct match or wildcard else: ACTIONS=['NO DEFINED ACTION FOR US TO TAKE'] - log.info("pillarWatch engine: all defined actions: %s" % actions['from']) - log.info("pillarWatch engine: ACTIONS: %s chosen based on previousPillarValue: %s switching to currentPillarValue: %s" % (ACTIONS, previousPillarValue, currentPillarValue)) + log.debug("pillarWatch engine: all defined actions: %s" % actions['from']) + log.debug("pillarWatch engine: ACTIONS: %s chosen based on previousPillarValue: %s switching to currentPillarValue: %s" % (ACTIONS, previousPillarValue, currentPillarValue)) for action in ACTIONS: log.info("pillarWatch engine: action: %s" % action) if action != 'NO DEFINED ACTION FOR US TO TAKE': for saltModule, args in action.items(): - log.info("pillarWatch engine: saltModule: %s" % saltModule) - log.info("pillarWatch engine: args: %s" % args) - __salt__[saltModule](**args) - #actionReturn = __salt__[saltModule](**args) - #log.info("pillarWatch engine: actionReturn: %s" % actionReturn) + log.debug("pillarWatch engine: saltModule: %s" % saltModule) + log.debug("pillarWatch engine: args: %s" % args) + #__salt__[saltModule](**args) + actionReturn = __salt__[saltModule](**args) + log.info("pillarWatch engine: actionReturn: %s" % actionReturn) dataFile.seek(0) dataFile.write(df) diff --git a/salt/salt/files/engines.conf b/salt/salt/files/engines.conf index 3066f588c..bee9493ee 100644 --- a/salt/salt/files/engines.conf +++ b/salt/salt/files/engines.conf @@ -18,19 +18,19 @@ engines: KAFKA: - cmd.run: cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled True - - cmd.run: - cmd: salt-call saltutil.kill_all_jobs - - cmd.run: - cmd: salt-call state.highstate & +# - cmd.run: +# cmd: salt-call saltutil.kill_all_jobs +# - cmd.run: +# cmd: salt-call state.highstate & KAFKA: to: '*': - cmd.run: cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled False - - cmd.run: - cmd: salt-call saltutil.kill_all_jobs - - cmd.run: - cmd: salt-call state.highstate & +# - cmd.run: +# cmd: salt-call saltutil.kill_all_jobs +# - cmd.run: +# cmd: salt-call state.highstate & - files: - /opt/so/saltstack/local/pillar/idstools/soc_idstools.sls - /opt/so/saltstack/local/pillar/idstools/adv_idstools.sls diff --git a/salt/salt/master.sls b/salt/salt/master.sls index 0a65f3e01..51acca61d 100644 --- a/salt/salt/master.sls +++ b/salt/salt/master.sls @@ -27,6 +27,11 @@ checkmine_engine: - source: salt://salt/engines/master/checkmine.py - makedirs: True +pillarWatch_engine: + file.managed: + - name: /etc/salt/engines/pillarWatch.py + - source: salt://salt/engines/master/pillarWatch.py + engines_config: file.managed: - name: /etc/salt/master.d/engines.conf From a237ef5d96568d0293f950c41c1d6347e8a948ad Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Fri, 19 Apr 2024 16:33:35 -0400 Subject: [PATCH 422/777] Update default queries --- salt/soc/defaults.yaml | 7 ++----- salt/strelka/compile_yara/compile_yara.py | 1 + 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 6a1376478..a4e7cbf4d 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -2128,14 +2128,11 @@ soc: query: "so_detection.isEnabled:false | groupby so_detection.language | groupby so_detection.ruleset so_detection.severity" description: Show all disabled Detections - name: "Detection Type - Suricata (NIDS)" - query: "so_detection.language:suricata | groupby so_detection.ruleset so_detection.isEnabled" + query: "so_detection.language:suricata | groupby so_detection.ruleset so_detection.isEnabled | groupby so_detection.category" description: Show all NIDS Detections, which are run with Suricata - name: "Detection Type - Sigma (Elastalert) - All" - query: "so_detection.language:sigma | groupby so_detection.ruleset so_detection.isEnabled" + query: "so_detection.language:sigma | groupby so_detection.ruleset so_detection.isEnabled | groupby so_detection.category | groupby so_detection.product" description: Show all Sigma Detections, which are run with Elastalert - - name: "Detection Type - Sigma (Elastalert) - Windows" - query: 'so_detection.language:sigma AND so_detection.content: "*product: windows*" | groupby so_detection.ruleset so_detection.isEnabled' - description: Show all Sigma Detections with a logsource of Windows - name: "Detection Type - YARA (Strelka)" query: "so_detection.language:yara | groupby so_detection.ruleset so_detection.isEnabled" description: Show all YARA detections, which are used by Strelka diff --git a/salt/strelka/compile_yara/compile_yara.py b/salt/strelka/compile_yara/compile_yara.py index 2138c73d1..6d88fbbde 100644 --- a/salt/strelka/compile_yara/compile_yara.py +++ b/salt/strelka/compile_yara/compile_yara.py @@ -43,6 +43,7 @@ def compile_yara_rules(rules_dir): "event.dataset": "soc.detections", "log.level": "error", "error.message": error_message, + "error.analysis": "syntax error", "detection_type": "yara", "rule.uuid": rule_id, "error.type": "runtime_status" From 25d63f751676b9fc708d04b4e4a5f60dc0e392e2 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Mon, 22 Apr 2024 16:42:59 -0400 Subject: [PATCH 423/777] Setup kafka reactor for managing kafka controllers globally Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/controllers.sls | 20 ++++++++++++++++++++ salt/kafka/enabled.sls | 1 + salt/reactor/kafka.sls | 16 ++++++++++++++++ salt/salt/files/reactor.conf | 3 +++ salt/salt/master.sls | 5 +++++ 5 files changed, 45 insertions(+) create mode 100644 salt/kafka/controllers.sls create mode 100644 salt/reactor/kafka.sls create mode 100644 salt/salt/files/reactor.conf diff --git a/salt/kafka/controllers.sls b/salt/kafka/controllers.sls new file mode 100644 index 000000000..c6df07b0c --- /dev/null +++ b/salt/kafka/controllers.sls @@ -0,0 +1,20 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% from 'allowed_states.map.jinja' import allowed_states %} +{% if sls.split('.')[0] in allowed_states %} +{% import_yaml 'kafka/defaults.yaml' as KAFKADEFAULTS %} + +{% set process_x_roles = salt['pillar.get']('kafka:config:server:process_x_roles', KAFKADEFAULTS.kafka.config.server.process_x_roles, merge=true) %} + +{# Send an event to the salt master at every highstate. Containing the minions process_x_roles. + if no value is set for this minion then the default in kafka/defaults.yaml is used #} +push_event_to_master: + event.send: + - name: kafka/controllers_update + - data: + id: {{ grains['id'] }} + process_x_roles: {{ process_x_roles }} +{% endif %} diff --git a/salt/kafka/enabled.sls b/salt/kafka/enabled.sls index 3c4f548f1..d05a49a0e 100644 --- a/salt/kafka/enabled.sls +++ b/salt/kafka/enabled.sls @@ -13,6 +13,7 @@ include: {% if grains.role in ['so-manager', 'so-managersearch', 'so-standalone'] %} - kafka.nodes {% endif %} + - kafka.controllers - elasticsearch.ca - kafka.sostatus - kafka.config diff --git a/salt/reactor/kafka.sls b/salt/reactor/kafka.sls new file mode 100644 index 000000000..879fb5431 --- /dev/null +++ b/salt/reactor/kafka.sls @@ -0,0 +1,16 @@ +{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +https://securityonion.net/license; you may not use this file except in compliance with the +Elastic License 2.0. #} + +{% set minionid = data['id'].split('_')[0] %} +{% set role = data['data']['process_x_roles'] %} + +{# Run so-yaml to replace kafka.node..role with the value from kafka/controllers.sls #} + +update_global_kafka_pillar: + local.cmd.run: + - tgt: 'G@role:so-manager or G@role:so-managersearch or G@role:so-standalone' + - tgt_type: compound + - arg: + - '/usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/nodes.sls kafka.nodes.{{ minionid }}.role {{ role }}' \ No newline at end of file diff --git a/salt/salt/files/reactor.conf b/salt/salt/files/reactor.conf new file mode 100644 index 000000000..129305572 --- /dev/null +++ b/salt/salt/files/reactor.conf @@ -0,0 +1,3 @@ +reactor: + - 'kafka/controllers_update': + - salt://reactor/kafka.sls \ No newline at end of file diff --git a/salt/salt/master.sls b/salt/salt/master.sls index 0a65f3e01..76340fb3d 100644 --- a/salt/salt/master.sls +++ b/salt/salt/master.sls @@ -32,6 +32,11 @@ engines_config: - name: /etc/salt/master.d/engines.conf - source: salt://salt/files/engines.conf +reactor_config: + file.managed: + - name: /etc/salt/master.d/reactor.conf + - source: salt://salt/files/reactor.conf + salt_master_service: service.running: - name: salt-master From 5a401af1fddb967a52d1d66d089b6f48746639f7 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Mon, 22 Apr 2024 16:44:35 -0400 Subject: [PATCH 424/777] Update kafka process_x_roles annotation Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/soc_kafka.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/salt/kafka/soc_kafka.yaml b/salt/kafka/soc_kafka.yaml index 500ad59c3..47ff05719 100644 --- a/salt/kafka/soc_kafka.yaml +++ b/salt/kafka/soc_kafka.yaml @@ -92,8 +92,10 @@ kafka: forcedType: int helpLink: kafka.html process_x_roles: - description: The roles the process performs. Use a comma-seperated list is multiple. + description: The roles performed by Kafka node. Default is to act as 'broker' only. title: process.roles + regex: ^(broker|controller|broker,controller|controller,broker)$ + regexFailureMessage: Valid values include 'broker' 'controller' or 'broker,controller' helpLink: kafka.html socket_x_receive_x_buffer_x_bytes: description: Size, in bytes of the SO_RCVBUF buffer. A value of -1 will use the OS default. From 58ddd55123e535bee74f25c97f85721769395548 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Tue, 23 Apr 2024 07:28:07 -0400 Subject: [PATCH 425/777] Exclude yara runtime log --- salt/common/tools/sbin/so-log-check | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index d54c60168..b83c98e7a 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -236,6 +236,7 @@ exclude_log "playbook.log" # Playbook is removed as of 2.4.70, logs may still be exclude_log "mysqld.log" # MySQL is removed as of 2.4.70, logs may still be on disk exclude_log "soctopus.log" # Soctopus is removed as of 2.4.70, logs may still be on disk exclude_log "agentstatus.log" # ignore this log since it tracks agents in error state +exclude_log "detections_runtime-status_yara.log" # temporarily ignore this log until Detections is more stable for log_file in $(cat /tmp/log_check_files); do status "Checking log file $log_file" From 691b02a15e8d6783280675638707a42d967785cf Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 23 Apr 2024 10:40:09 -0400 Subject: [PATCH 426/777] Fix warm description --- salt/elasticsearch/soc_elasticsearch.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/soc_elasticsearch.yaml b/salt/elasticsearch/soc_elasticsearch.yaml index 96a757c82..81753f16b 100644 --- a/salt/elasticsearch/soc_elasticsearch.yaml +++ b/salt/elasticsearch/soc_elasticsearch.yaml @@ -131,7 +131,7 @@ elasticsearch: helpLink: elasticsearch.html warm: min_age: - description: Minimum age of index. ex. 30d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. + description: Minimum age of index. ex. 30d - This determines when the index should be moved to the warm tier. Nodes in the warm tier generally don’t need to be as fast as those in the hot tier. regex: ^\[0-9\]{1,5}d$ forcedType: string global: True From aa0c589361f3719173d264d5aa7856f4ac66fcbd Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 23 Apr 2024 13:51:12 -0400 Subject: [PATCH 427/777] Update kafka managed node pillar template to include its process.role Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/files/managed_node_pillar.jinja | 3 +++ 1 file changed, 3 insertions(+) diff --git a/salt/kafka/files/managed_node_pillar.jinja b/salt/kafka/files/managed_node_pillar.jinja index fb2ef410e..aa4f7e502 100644 --- a/salt/kafka/files/managed_node_pillar.jinja +++ b/salt/kafka/files/managed_node_pillar.jinja @@ -4,4 +4,7 @@ kafka: {{ node }}: ip: {{ values['ip'] }} nodeid: {{ values['nodeid'] }} +{%- if values['role'] != none %} + role: {{ values['role'] }} +{%- endif %} {% endfor %} \ No newline at end of file From 36573d6005a40ccd7b83fe32087a453f0abb5a48 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 23 Apr 2024 16:45:36 -0400 Subject: [PATCH 428/777] Update kafka cert permissions Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/ssl/init.sls | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index 853afb2b3..854628949 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -856,10 +856,10 @@ kafka_logstash_pkcs12_perms: - user: 960 - group: 931 -kafka_pkcs8_perms: +elasticfleet_kafka_pkcs8_perms: file.managed: - replace: False - - name: /etc/pki/kafka.p8 + - name: /etc/pki/elasticfleet-kafka.p8 - mode: 640 - user: 960 - group: 939 From e53e7768a070f4f4abf31977df75054fa552eb41 Mon Sep 17 00:00:00 2001 From: Pete Date: Tue, 23 Apr 2024 21:24:39 +0000 Subject: [PATCH 429/777] check status before stopping service resolves #12811 so-verify detects rare false error If salt is uninstalled during call to so-setup where it detects a previous install, the "Failed" keyword from "systemctl stop $service" causes so-verify to falsely detect an installation error. This might happen if the user removes the salt packages between calls to so-setup, or if upgrading from Ubuntu 20.04 to 22.04 then installing 2.4.xx on top of a 2.3.xx installation. The fix is to wrap the call to stop the service in a check if the service is running. This ignores the setting of pid var, as the next use of pid is within a while loop that will not execute for the same reason the systemctl stop call was not launched in the background. --- setup/so-functions | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index d19f27620..b60989fb2 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1603,7 +1603,9 @@ reinstall_init() { # Kill any salt processes (safely) for service in "${salt_services[@]}"; do # Stop the service in the background so we can exit after a certain amount of time - systemctl stop "$service" & + if check_service_status "$service"; then + systemctl stop "$service" & + fi local pid=$! local count=0 From 8a0a4357005875d14aef7350a3828f76d2cc5eff Mon Sep 17 00:00:00 2001 From: weslambert Date: Wed, 24 Apr 2024 08:35:19 -0400 Subject: [PATCH 430/777] Fix warm description --- salt/elasticsearch/soc_elasticsearch.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/soc_elasticsearch.yaml b/salt/elasticsearch/soc_elasticsearch.yaml index 81753f16b..81070176c 100644 --- a/salt/elasticsearch/soc_elasticsearch.yaml +++ b/salt/elasticsearch/soc_elasticsearch.yaml @@ -271,7 +271,7 @@ elasticsearch: helpLink: elasticsearch.html warm: min_age: - description: Minimum age of index. This determines when the index should be moved to the hot tier. + description: Minimum age of index. This determines when the index should be moved to the warm tier. global: True advanced: True helpLink: elasticsearch.html From 75b5e16696a46d805b2ad76f10383f422600b517 Mon Sep 17 00:00:00 2001 From: weslambert Date: Wed, 24 Apr 2024 09:14:39 -0400 Subject: [PATCH 431/777] Update description, type, and regex --- salt/elasticsearch/soc_elasticsearch.yaml | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/salt/elasticsearch/soc_elasticsearch.yaml b/salt/elasticsearch/soc_elasticsearch.yaml index 81070176c..dac2614ce 100644 --- a/salt/elasticsearch/soc_elasticsearch.yaml +++ b/salt/elasticsearch/soc_elasticsearch.yaml @@ -100,6 +100,8 @@ elasticsearch: hot: max_age: description: Maximum age of index. ex. 7d - This determines when the index should be moved out of the hot tier. + regex: ^\[0-9\]{1,5}d$ + forcedType: string global: True helpLink: elasticsearch.html actions: @@ -121,6 +123,8 @@ elasticsearch: cold: min_age: description: Minimum age of index. ex. 30d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. + regex: ^\[0-9\]{1,5}d$ + forcedType: string global: True helpLink: elasticsearch.html actions: @@ -145,6 +149,8 @@ elasticsearch: delete: min_age: description: Minimum age of index. ex. 90d - This determines when the index should be deleted. + regex: ^\[0-9\]{1,5}d$ + forcedType: string global: True helpLink: elasticsearch.html so-logs: &indexSettings @@ -271,7 +277,9 @@ elasticsearch: helpLink: elasticsearch.html warm: min_age: - description: Minimum age of index. This determines when the index should be moved to the warm tier. + description: Minimum age of index. ex. 30d - This determines when the index should be moved to the warm tier. Nodes in the warm tier generally don’t need to be as fast as those in the hot tier. + regex: ^\[0-9\]{1,5}d$ + forcedType: string global: True advanced: True helpLink: elasticsearch.html @@ -296,7 +304,9 @@ elasticsearch: helpLink: elasticsearch.html cold: min_age: - description: Minimum age of index. This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. + description: Minimum age of index. ex. 30d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. + regex: ^\[0-9\]{1,5}d$ + forcedType: string global: True advanced: True helpLink: elasticsearch.html @@ -311,6 +321,8 @@ elasticsearch: delete: min_age: description: Minimum age of index. This determines when the index should be deleted. + regex: ^\[0-9\]{1,5}d$ + forcedType: string global: True advanced: True helpLink: elasticsearch.html From 1b3a0a3de8d57166e3e0228d8562d3269d316bf3 Mon Sep 17 00:00:00 2001 From: weslambert Date: Wed, 24 Apr 2024 10:11:02 -0400 Subject: [PATCH 432/777] Remove hot max_age --- salt/elasticsearch/soc_elasticsearch.yaml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/salt/elasticsearch/soc_elasticsearch.yaml b/salt/elasticsearch/soc_elasticsearch.yaml index dac2614ce..7a6885075 100644 --- a/salt/elasticsearch/soc_elasticsearch.yaml +++ b/salt/elasticsearch/soc_elasticsearch.yaml @@ -98,12 +98,6 @@ elasticsearch: policy: phases: hot: - max_age: - description: Maximum age of index. ex. 7d - This determines when the index should be moved out of the hot tier. - regex: ^\[0-9\]{1,5}d$ - forcedType: string - global: True - helpLink: elasticsearch.html actions: set_priority: priority: From 0bd0c7b1ec09b86b460cf16d24b5d172bd9beaa4 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 24 Apr 2024 13:26:25 -0400 Subject: [PATCH 433/777] allow for mmap-locked to be configured --- salt/suricata/defaults.yaml | 1 + salt/suricata/map.jinja | 1 + salt/suricata/soc_suricata.yaml | 5 +++++ 3 files changed, 7 insertions(+) diff --git a/salt/suricata/defaults.yaml b/salt/suricata/defaults.yaml index 914c045b1..fa863473a 100644 --- a/salt/suricata/defaults.yaml +++ b/salt/suricata/defaults.yaml @@ -30,6 +30,7 @@ suricata: cluster-type: cluster_flow defrag: "yes" use-mmap: "yes" + mmap-locked: "yes" threads: 1 tpacket-v3: "yes" ring-size: 5000 diff --git a/salt/suricata/map.jinja b/salt/suricata/map.jinja index 2a3adf5f1..d9748acee 100644 --- a/salt/suricata/map.jinja +++ b/salt/suricata/map.jinja @@ -34,6 +34,7 @@ cluster-type: {{ SURICATAMERGED.config['af-packet']['cluster-type'] }} defrag: "{{ SURICATAMERGED.config['af-packet'].defrag }}" use-mmap: "{{ SURICATAMERGED.config['af-packet']['use-mmap'] }}" + mmap-locked: {{ SURICATAMERGED.config['af-packet']['mmap-locked'] }} threads: {{ SURICATAMERGED.config['af-packet'].threads }} tpacket-v3: "{{ SURICATAMERGED.config['af-packet']['tpacket-v3'] }}" ring-size: {{ SURICATAMERGED.config['af-packet']['ring-size'] }} diff --git a/salt/suricata/soc_suricata.yaml b/salt/suricata/soc_suricata.yaml index b0a864329..a1847167c 100644 --- a/salt/suricata/soc_suricata.yaml +++ b/salt/suricata/soc_suricata.yaml @@ -83,6 +83,11 @@ suricata: use-mmap: advanced: True readonly: True + mmap-locked: + description: Prevent swapping by locking the memory map. + advanced: True + regex: ^(yes|no)$ + helpLink: suricata.html threads: description: The amount of worker threads. helpLink: suricata.html From 4b7f826a2a8ca44f5f6eb7d809b50fb09d2ba1c8 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 24 Apr 2024 13:29:55 -0400 Subject: [PATCH 434/777] quote is so true becomes yes --- salt/suricata/map.jinja | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/suricata/map.jinja b/salt/suricata/map.jinja index d9748acee..a5012317a 100644 --- a/salt/suricata/map.jinja +++ b/salt/suricata/map.jinja @@ -34,7 +34,7 @@ cluster-type: {{ SURICATAMERGED.config['af-packet']['cluster-type'] }} defrag: "{{ SURICATAMERGED.config['af-packet'].defrag }}" use-mmap: "{{ SURICATAMERGED.config['af-packet']['use-mmap'] }}" - mmap-locked: {{ SURICATAMERGED.config['af-packet']['mmap-locked'] }} + mmap-locked: "{{ SURICATAMERGED.config['af-packet']['mmap-locked'] }}" threads: {{ SURICATAMERGED.config['af-packet'].threads }} tpacket-v3: "{{ SURICATAMERGED.config['af-packet']['tpacket-v3'] }}" ring-size: {{ SURICATAMERGED.config['af-packet']['ring-size'] }} From 13a6520a8ca3ab753906f837b1d767b4e8b548ea Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 24 Apr 2024 13:50:12 -0400 Subject: [PATCH 435/777] mmap-locked default no --- salt/suricata/defaults.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/suricata/defaults.yaml b/salt/suricata/defaults.yaml index fa863473a..d819d1cf9 100644 --- a/salt/suricata/defaults.yaml +++ b/salt/suricata/defaults.yaml @@ -30,7 +30,7 @@ suricata: cluster-type: cluster_flow defrag: "yes" use-mmap: "yes" - mmap-locked: "yes" + mmap-locked: "no" threads: 1 tpacket-v3: "yes" ring-size: 5000 From 59a02635ed9953ffc25f14f0b3f6c2263ebe872f Mon Sep 17 00:00:00 2001 From: weslambert Date: Wed, 24 Apr 2024 15:18:49 -0400 Subject: [PATCH 436/777] Change index sorting --- .../so-elasticsearch-indices-delete-delete | 36 +++++++++++-------- 1 file changed, 22 insertions(+), 14 deletions(-) diff --git a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-indices-delete-delete b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-indices-delete-delete index 07feb36bd..5577fde3f 100755 --- a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-indices-delete-delete +++ b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-indices-delete-delete @@ -27,6 +27,7 @@ overlimit() { # 2. Check if the maximum number of iterations - MAX_ITERATIONS - has been exceeded. If so, exit. # Closed indices will be deleted first. If we are able to bring disk space under LOG_SIZE_LIMIT, or the number of iterations has exceeded the maximum allowed number of iterations, we will break out of the loop. + while overlimit && [[ $ITERATION -lt $MAX_ITERATIONS ]]; do # If we can't query Elasticsearch, then immediately return false. @@ -34,28 +35,35 @@ while overlimit && [[ $ITERATION -lt $MAX_ITERATIONS ]]; do [ $? -eq 1 ] && echo "$(date) - Could not query Elasticsearch." >> ${LOG} && exit # We iterate through the closed and open indices - CLOSED_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'close$' | awk '{print $1}' | grep -vE "playbook|so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3) - OPEN_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'open$' | awk '{print $1}' | grep -vE "playbook|so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3) + CLOSED_SO_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'close$' | awk '{print $1}' | grep -E "(^logstash-.*|^so-.*)" | grep -vE "so-case|so-detection" | sort -t- -k3) + CLOSED_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'close$' | awk '{print $1}' | grep -E "^.ds-logs-.*" | sort -t- -k4) + OPEN_SO_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'open$' | awk '{print $1}' | grep -E "(^logstash-.*|^so-.*)" | grep -vE "so-case|so-detection" | sort -t- -k3) + OPEN_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'open$' | awk '{print $1}' | grep -E "^.ds-logs-.*" | sort -t- -k4) - for INDEX in ${CLOSED_INDICES} ${OPEN_INDICES}; do + for INDEX in ${CLOSED_SO_INDICES} ${OPEN_SO_INDICES} ${CLOSED_INDICES} ${OPEN_INDICES}; do # Now that we've sorted the indices from oldest to newest, we need to check each index to see if it is assigned as the current write index for a data stream # To do so, we need to identify to which data stream this index is associated # We extract the data stream name using the pattern below DATASTREAM_PATTERN="logs-[a-zA-Z_.]+-[a-zA-Z_.]+" DATASTREAM=$(echo "${INDEX}" | grep -oE "$DATASTREAM_PATTERN") - # We look up the data stream, and determine the write index. If there is only one backing index, we delete the entire data stream - BACKING_INDICES=$(/usr/sbin/so-elasticsearch-query _data_stream/${DATASTREAM} | jq -r '.data_streams[0].indices | length') - if [ "$BACKING_INDICES" -gt 1 ]; then - CURRENT_WRITE_INDEX=$(/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM | jq -r .data_streams[0].indices[-1].index_name) - # We make sure we are not trying to delete a write index - if [ "${INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then - # This should not be a write index, so we should be allowed to delete it - printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT_GB} GB) - Deleting ${INDEX} index...\n" >> ${LOG} - /usr/sbin/so-elasticsearch-query ${INDEX} -XDELETE >> ${LOG} 2>&1 - fi + if [[ "$INDEX" =~ "^so-.*" ]]; then + printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT_GB} GB) - Deleting ${INDEX} index...\n" >> ${LOG} + /usr/sbin/so-elasticsearch-query ${INDEX} -XDELETE >> ${LOG} 2>&1 else - printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT_GB} GB) - There is only one backing index (${INDEX}). Deleting ${DATASTREAM} data stream...\n" >> ${LOG} + # We look up the data stream, and determine the write index. If there is only one backing index, we delete the entire data stream + BACKING_INDICES=$(/usr/sbin/so-elasticsearch-query _data_stream/${DATASTREAM} | jq -r '.data_streams[0].indices | length') + if [ "$BACKING_INDICES" -gt 1 ]; then + CURRENT_WRITE_INDEX=$(/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM | jq -r .data_streams[0].indices[-1].index_name) + # We make sure we are not trying to delete a write index + if [ "${INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then + # This should not be a write index, so we should be allowed to delete it + printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT_GB} GB) - Deleting ${INDEX} index...\n" >> ${LOG} + /usr/sbin/so-elasticsearch-query ${INDEX} -XDELETE >> ${LOG} 2>&1 + fi + else + printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT_GB} GB) - There is only one backing index (${INDEX}). Deleting ${DATASTREAM} data stream...\n" >> ${LOG} /usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM -XDELETE >> ${LOG} 2>&1 + fi fi if ! overlimit ; then exit From 73b5bb1a75312b0c7a712c0ae1cde30f01fa26fd Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 24 Apr 2024 15:35:17 -0400 Subject: [PATCH 437/777] add memlock to so-suricata container --- salt/docker/defaults.yaml | 2 ++ salt/docker/soc_docker.yaml | 38 ++++++++++++++++++++++++++++++++++++- salt/suricata/enabled.sls | 6 ++++++ 3 files changed, 45 insertions(+), 1 deletion(-) diff --git a/salt/docker/defaults.yaml b/salt/docker/defaults.yaml index 2ceaecaa7..b1d3b4e44 100644 --- a/salt/docker/defaults.yaml +++ b/salt/docker/defaults.yaml @@ -180,6 +180,8 @@ docker: custom_bind_mounts: [] extra_hosts: [] extra_env: [] + ulimits: + - memlock=524288000 'so-zeek': final_octet: 99 custom_bind_mounts: [] diff --git a/salt/docker/soc_docker.yaml b/salt/docker/soc_docker.yaml index da078941a..08e0dccc5 100644 --- a/salt/docker/soc_docker.yaml +++ b/salt/docker/soc_docker.yaml @@ -63,5 +63,41 @@ docker: so-elastic-agent: *dockerOptions so-telegraf: *dockerOptions so-steno: *dockerOptions - so-suricata: *dockerOptions + so-suricata: + final_octet: + description: Last octet of the container IP address. + helpLink: docker.html + readonly: True + advanced: True + global: True + port_bindings: + description: List of port bindings for the container. + helpLink: docker.html + advanced: True + multiline: True + forcedType: "[]string" + custom_bind_mounts: + description: List of custom local volume bindings. + advanced: True + helpLink: docker.html + multiline: True + forcedType: "[]string" + extra_hosts: + description: List of additional host entries for the container. + advanced: True + helpLink: docker.html + multiline: True + forcedType: "[]string" + extra_env: + description: List of additional ENV entries for the container. + advanced: True + helpLink: docker.html + multiline: True + forcedType: "[]string" + ulimits: + description: Ulimits for the container, in bytes. + advanced: True + helpLink: docker.html + multiline: True + forcedType: "[]string" so-zeek: *dockerOptions diff --git a/salt/suricata/enabled.sls b/salt/suricata/enabled.sls index d35160527..8520187d0 100644 --- a/salt/suricata/enabled.sls +++ b/salt/suricata/enabled.sls @@ -24,6 +24,12 @@ so-suricata: - {{ XTRAENV }} {% endfor %} {% endif %} + {% if DOCKER.containers['so-suricata'].ulimits %} + - ulimits: + {% for ULIMIT in DOCKER.containers['so-suricata'].ulimits %} + - {{ ULIMIT }} + {% endfor %} + {% endif %} - binds: - /opt/so/conf/suricata/suricata.yaml:/etc/suricata/suricata.yaml:ro - /opt/so/conf/suricata/threshold.conf:/etc/suricata/threshold.conf:ro From c9d9979f2250491025d7779731389f8153f4d833 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 24 Apr 2024 16:18:45 -0400 Subject: [PATCH 438/777] allow for enabled/disable of so-elasticsearch-indices-delete cronjob --- salt/elasticsearch/defaults.yaml | 3 ++- salt/elasticsearch/enabled.sls | 13 ++++++++++--- salt/elasticsearch/soc_elasticsearch.yaml | 4 ++++ 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index 09f3bd681..53340340f 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -1,4 +1,6 @@ elasticsearch: + enabled: false + index_clean: true config: action: destructive_requires_name: true @@ -54,7 +56,6 @@ elasticsearch: enabled: true key: /usr/share/elasticsearch/config/elasticsearch.key verification_mode: none - enabled: false pipelines: custom001: description: Custom Pipeline diff --git a/salt/elasticsearch/enabled.sls b/salt/elasticsearch/enabled.sls index b9c66f231..1d35d3505 100644 --- a/salt/elasticsearch/enabled.sls +++ b/salt/elasticsearch/enabled.sls @@ -200,9 +200,15 @@ so-elasticsearch-roles-load: - require: - docker_container: so-elasticsearch - file: elasticsearch_sbin_jinja -{% if grains.role in ['so-eval', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-manager'] %} + +{% if grains.role in ['so-eval', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-manager'] %} +{% if ELASTICSEARCHMERGED.index_clean %} +{% set ap = "present" %} +{% else %} +{% set ap = "absent" %} +{% endif %} so-elasticsearch-indices-delete: - cron.present: + cron.{{ap}}: - name: /usr/sbin/so-elasticsearch-indices-delete > /opt/so/log/elasticsearch/cron-elasticsearch-indices-delete.log 2>&1 - identifier: so-elasticsearch-indices-delete - user: root @@ -211,7 +217,8 @@ so-elasticsearch-indices-delete: - daymonth: '*' - month: '*' - dayweek: '*' -{% endif %} +{% endif %} + {% endif %} {% else %} diff --git a/salt/elasticsearch/soc_elasticsearch.yaml b/salt/elasticsearch/soc_elasticsearch.yaml index 7a6885075..92c759288 100644 --- a/salt/elasticsearch/soc_elasticsearch.yaml +++ b/salt/elasticsearch/soc_elasticsearch.yaml @@ -5,6 +5,10 @@ elasticsearch: esheap: description: Specify the memory heap size in (m)egabytes for Elasticsearch. helpLink: elasticsearch.html + index_clean: + description: Enable or disable so-elasticsearch-indices-delete cron job. + forcedType: bool + helpLink: elasticsearch.html retention: retention_pct: decription: Total percentage of space used by Elasticsearch for multi node clusters From 3c3ed8b5c516af5a775cdb332055130fd3c92ba5 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Wed, 24 Apr 2024 16:33:47 -0400 Subject: [PATCH 439/777] Add runtime status logs --- .../soc-detections-logs.json | 35 +++++++++++++++++++ salt/soc/config.sls | 11 ++++++ salt/soc/defaults.yaml | 6 ++++ .../tools/sbin/so-detections-runtime-status | 33 +++++++++++++++++ salt/strelka/compile_yara/compile_yara.py | 14 ++++---- 5 files changed, 92 insertions(+), 7 deletions(-) create mode 100644 salt/elasticfleet/files/integrations/grid-nodes_general/soc-detections-logs.json create mode 100644 salt/soc/tools/sbin/so-detections-runtime-status diff --git a/salt/elasticfleet/files/integrations/grid-nodes_general/soc-detections-logs.json b/salt/elasticfleet/files/integrations/grid-nodes_general/soc-detections-logs.json new file mode 100644 index 000000000..5649b481d --- /dev/null +++ b/salt/elasticfleet/files/integrations/grid-nodes_general/soc-detections-logs.json @@ -0,0 +1,35 @@ +{ + "policy_id": "so-grid-nodes_general", + "package": { + "name": "log", + "version": "" + }, + "name": "soc-detections-logs", + "description": "Security Onion Console - Detections Logs", + "namespace": "so", + "inputs": { + "logs-logfile": { + "enabled": true, + "streams": { + "log.logs": { + "enabled": true, + "vars": { + "paths": [ + "/opt/so/log/soc/detections_runtime-status_sigma.log", + "/opt/so/log/soc/detections_runtime-status_yara.log" + ], + "exclude_files": [], + "ignore_older": "72h", + "data_stream.dataset": "soc", + "tags": [ + "so-soc" + ], + "processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: detections\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true", + "custom": "pipeline: common" + } + } + } + } + }, + "force": true +} diff --git a/salt/soc/config.sls b/salt/soc/config.sls index 3e756f977..af34f5e7c 100644 --- a/salt/soc/config.sls +++ b/salt/soc/config.sls @@ -80,6 +80,17 @@ socmotd: - mode: 600 - template: jinja +crondetectionsruntime: + cron.present: + - name: /usr/local/bin/so-detections-runtime-status cron + - identifier: detections-runtime-status + - user: socore + - minute: '*/10' + - hour: '*' + - daymonth: '*' + - month: '*' + - dayweek: '*' + socsigmafinalpipeline: file.managed: - name: /opt/so/conf/soc/sigma_final_pipeline.yaml diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 13e2021c5..1c14e61cb 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1912,6 +1912,12 @@ soc: - name: Kismet - WiFi Devices description: WiFi devices seen by Kismet sensors query: 'event.module: kismet | groupby network.wireless.ssid | groupby device.manufacturer | groupby -pie device.manufacturer | groupby event.dataset' + - name: SOC Detections - Runtime Status + description: Runtime Status of Detections + query: 'event.dataset:soc.detections | groupby soc.detection_type soc.error_type | groupby soc.error_analysis | groupby soc.rule.name | groupby soc.error_message' + + + job: alerts: advanced: false diff --git a/salt/soc/tools/sbin/so-detections-runtime-status b/salt/soc/tools/sbin/so-detections-runtime-status new file mode 100644 index 000000000..ed3ee5800 --- /dev/null +++ b/salt/soc/tools/sbin/so-detections-runtime-status @@ -0,0 +1,33 @@ +#!/bin/bash +# +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +# Set the default output destination to stdout +output_dest="/dev/stdout" + +# If the "cron" flag is passed, change the output destination to the log file +if [ "$1" = "cron" ]; then + output_dest="/opt/so/log/soc/detections_runtime-status_sigma.log" +fi + +# Run the query and output based on the output_dest value +/sbin/so-elasticsearch-query '*:elastalert_error*/_search' -d '{"query":{"range":{"@timestamp":{"gte":"now-11m","lte":"now"}}},"size": 50}' | \ +jq --compact-output '.hits.hits[] | { + _timestamp: ._source["@timestamp"], + "rule.name": ._source.data.rule, + error_type: "runtime_status", + error_message: ._source.message, + detection_type: "sigma", + event_module: "soc", + event_dataset: "soc.detections", + error_analysis: ( + if ._source.message | contains("Unknown column [winlog.channel]") then "Target logsource never seen" + elif ._source.message | contains("parsing_exception") then "Syntax Error" + else "Unknown" + end + ) + }' >> $output_dest + diff --git a/salt/strelka/compile_yara/compile_yara.py b/salt/strelka/compile_yara/compile_yara.py index 6d88fbbde..ece3c6a9e 100644 --- a/salt/strelka/compile_yara/compile_yara.py +++ b/salt/strelka/compile_yara/compile_yara.py @@ -39,14 +39,14 @@ def compile_yara_rules(rules_dir): # Extract just the UUID from the rule file name rule_id = os.path.splitext(os.path.basename(rule_file))[0] log_entry = { - "event.module": "soc", - "event.dataset": "soc.detections", + "event_module": "soc", + "event_dataset": "soc.detections", "log.level": "error", - "error.message": error_message, - "error.analysis": "syntax error", - "detection_type": "yara", - "rule.uuid": rule_id, - "error.type": "runtime_status" + "error_message": error_message, + "error_analysis": "Syntax Error", + "detection_type": "YARA", + "rule_uuid": rule_id, + "error_type": "runtime_status" } with open('/opt/sensoroni/logs/detections_runtime-status_yara.log', 'a') as log_file: json.dump(log_entry, log_file) From ab832e4bb2fc6341c9c61c4949a0542b651aef10 Mon Sep 17 00:00:00 2001 From: weslambert Date: Wed, 24 Apr 2024 17:17:53 -0400 Subject: [PATCH 440/777] Include logstash-prefixed indices --- .../tools/sbin_jinja/so-elasticsearch-indices-delete-delete | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-indices-delete-delete b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-indices-delete-delete index 5577fde3f..44f27e9d4 100755 --- a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-indices-delete-delete +++ b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-indices-delete-delete @@ -46,7 +46,7 @@ while overlimit && [[ $ITERATION -lt $MAX_ITERATIONS ]]; do # We extract the data stream name using the pattern below DATASTREAM_PATTERN="logs-[a-zA-Z_.]+-[a-zA-Z_.]+" DATASTREAM=$(echo "${INDEX}" | grep -oE "$DATASTREAM_PATTERN") - if [[ "$INDEX" =~ "^so-.*" ]]; then + if [[ "$INDEX" =~ "^logstash-.*|so-.*" ]]; then printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT_GB} GB) - Deleting ${INDEX} index...\n" >> ${LOG} /usr/sbin/so-elasticsearch-query ${INDEX} -XDELETE >> ${LOG} 2>&1 else From 44afa55274233c3c656691a35e9a4511edf9da93 Mon Sep 17 00:00:00 2001 From: weslambert Date: Wed, 24 Apr 2024 17:41:37 -0400 Subject: [PATCH 441/777] Fix comments about deletion --- .../sbin_jinja/so-elasticsearch-indices-delete-delete | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-indices-delete-delete b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-indices-delete-delete index 44f27e9d4..5e97a3f19 100755 --- a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-indices-delete-delete +++ b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-indices-delete-delete @@ -41,15 +41,16 @@ while overlimit && [[ $ITERATION -lt $MAX_ITERATIONS ]]; do OPEN_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'open$' | awk '{print $1}' | grep -E "^.ds-logs-.*" | sort -t- -k4) for INDEX in ${CLOSED_SO_INDICES} ${OPEN_SO_INDICES} ${CLOSED_INDICES} ${OPEN_INDICES}; do - # Now that we've sorted the indices from oldest to newest, we need to check each index to see if it is assigned as the current write index for a data stream - # To do so, we need to identify to which data stream this index is associated - # We extract the data stream name using the pattern below - DATASTREAM_PATTERN="logs-[a-zA-Z_.]+-[a-zA-Z_.]+" - DATASTREAM=$(echo "${INDEX}" | grep -oE "$DATASTREAM_PATTERN") + # Check if index is an older index. If it is an older index, delete it before moving on to newer indices. if [[ "$INDEX" =~ "^logstash-.*|so-.*" ]]; then printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT_GB} GB) - Deleting ${INDEX} index...\n" >> ${LOG} /usr/sbin/so-elasticsearch-query ${INDEX} -XDELETE >> ${LOG} 2>&1 else + # Now that we've sorted the indices from oldest to newest, we need to check each index to see if it is assigned as the current write index for a data stream + # To do so, we need to identify to which data stream this index is associated + # We extract the data stream name using the pattern below + DATASTREAM_PATTERN="logs-[a-zA-Z_.]+-[a-zA-Z_.]+" + DATASTREAM=$(echo "${INDEX}" | grep -oE "$DATASTREAM_PATTERN") # We look up the data stream, and determine the write index. If there is only one backing index, we delete the entire data stream BACKING_INDICES=$(/usr/sbin/so-elasticsearch-query _data_stream/${DATASTREAM} | jq -r '.data_streams[0].indices | length') if [ "$BACKING_INDICES" -gt 1 ]; then From d50de804a8c756e79c9fef6b4cd8094e41c2df6f Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 25 Apr 2024 09:04:34 -0400 Subject: [PATCH 442/777] update annotation --- salt/elasticsearch/soc_elasticsearch.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/soc_elasticsearch.yaml b/salt/elasticsearch/soc_elasticsearch.yaml index 92c759288..210697bba 100644 --- a/salt/elasticsearch/soc_elasticsearch.yaml +++ b/salt/elasticsearch/soc_elasticsearch.yaml @@ -6,7 +6,7 @@ elasticsearch: description: Specify the memory heap size in (m)egabytes for Elasticsearch. helpLink: elasticsearch.html index_clean: - description: Enable or disable so-elasticsearch-indices-delete cron job. + description: Determines if indices should be considered for deletion by available disk space in the cluster. Otherwise, indices will only be deleted by the age defined in the ILM settings. forcedType: bool helpLink: elasticsearch.html retention: From b42442629844df877b1105abbcb5109b76076ff7 Mon Sep 17 00:00:00 2001 From: weslambert Date: Thu, 25 Apr 2024 09:14:18 -0400 Subject: [PATCH 443/777] Exclude suricata --- .../tools/sbin_jinja/so-elasticsearch-indices-delete-delete | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-indices-delete-delete b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-indices-delete-delete index 5e97a3f19..a00437a25 100755 --- a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-indices-delete-delete +++ b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-indices-delete-delete @@ -36,9 +36,9 @@ while overlimit && [[ $ITERATION -lt $MAX_ITERATIONS ]]; do # We iterate through the closed and open indices CLOSED_SO_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'close$' | awk '{print $1}' | grep -E "(^logstash-.*|^so-.*)" | grep -vE "so-case|so-detection" | sort -t- -k3) - CLOSED_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'close$' | awk '{print $1}' | grep -E "^.ds-logs-.*" | sort -t- -k4) + CLOSED_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'close$' | awk '{print $1}' | grep -E "^.ds-logs-.*" | grep -v "suricata" | sort -t- -k4) OPEN_SO_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'open$' | awk '{print $1}' | grep -E "(^logstash-.*|^so-.*)" | grep -vE "so-case|so-detection" | sort -t- -k3) - OPEN_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'open$' | awk '{print $1}' | grep -E "^.ds-logs-.*" | sort -t- -k4) + OPEN_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'open$' | awk '{print $1}' | grep -E "^.ds-logs-.*" | grep -v "suricata" | sort -t- -k4) for INDEX in ${CLOSED_SO_INDICES} ${OPEN_SO_INDICES} ${CLOSED_INDICES} ${OPEN_INDICES}; do # Check if index is an older index. If it is an older index, delete it before moving on to newer indices. From 2c7eb3c755ef475e082a59645ba2e98c3abcddd0 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 25 Apr 2024 10:05:59 -0400 Subject: [PATCH 444/777] only apply ulimits to suricata container if user enable mmap-locked --- salt/suricata/enabled.sls | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/salt/suricata/enabled.sls b/salt/suricata/enabled.sls index 8520187d0..3e015d100 100644 --- a/salt/suricata/enabled.sls +++ b/salt/suricata/enabled.sls @@ -7,6 +7,7 @@ {% if sls.split('.')[0] in allowed_states %} {% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'docker/docker.map.jinja' import DOCKER %} +{% from 'suricata/map.jinja' import SURICATAMERGED %} include: @@ -24,7 +25,8 @@ so-suricata: - {{ XTRAENV }} {% endfor %} {% endif %} - {% if DOCKER.containers['so-suricata'].ulimits %} + {# we look at SURICATAMERGED.config['af-packet'][0] since we only allow one interface and therefore always the first list item #} + {% if SURICATAMERGED.config['af-packet'][0]['mmap-locked'] == "yes" and DOCKER.containers['so-suricata'].ulimits %} - ulimits: {% for ULIMIT in DOCKER.containers['so-suricata'].ulimits %} - {{ ULIMIT }} From 89cb8b79fdad0d1fdcb46c09bb4f6cd322cdeed7 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Mon, 29 Apr 2024 08:07:19 -0400 Subject: [PATCH 445/777] restrict workflows to so --- .github/workflows/close-threads.yml | 1 + .github/workflows/lock-threads.yml | 1 + 2 files changed, 2 insertions(+) diff --git a/.github/workflows/close-threads.yml b/.github/workflows/close-threads.yml index 059a35a9f..41b267f2b 100644 --- a/.github/workflows/close-threads.yml +++ b/.github/workflows/close-threads.yml @@ -15,6 +15,7 @@ concurrency: jobs: close-threads: + if: github.repository_owner == 'security-onion-solutions' runs-on: ubuntu-latest permissions: issues: write diff --git a/.github/workflows/lock-threads.yml b/.github/workflows/lock-threads.yml index eeaa444ed..f5d46ed46 100644 --- a/.github/workflows/lock-threads.yml +++ b/.github/workflows/lock-threads.yml @@ -15,6 +15,7 @@ concurrency: jobs: lock-threads: + if: github.repository_owner == 'security-onion-solutions' runs-on: ubuntu-latest steps: - uses: jertel/lock-threads@main From f2c3c928fc4753fba8888110f2470fd5c7e40bea Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Mon, 29 Apr 2024 08:49:05 -0400 Subject: [PATCH 446/777] Sigma pivot fix and cleanup --- .../files/modules/so/playbook-es.py | 38 ----------- .../files/modules/so/securityonion-es.py | 63 +++++++++++++++++++ salt/soc/defaults.yaml | 1 + 3 files changed, 64 insertions(+), 38 deletions(-) delete mode 100644 salt/elastalert/files/modules/so/playbook-es.py create mode 100644 salt/elastalert/files/modules/so/securityonion-es.py diff --git a/salt/elastalert/files/modules/so/playbook-es.py b/salt/elastalert/files/modules/so/playbook-es.py deleted file mode 100644 index 3a43c26c1..000000000 --- a/salt/elastalert/files/modules/so/playbook-es.py +++ /dev/null @@ -1,38 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - -from time import gmtime, strftime -import requests,json -from elastalert.alerts import Alerter - -import urllib3 -urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) - -class PlaybookESAlerter(Alerter): - """ - Use matched data to create alerts in elasticsearch - """ - - required_options = set(['play_title','play_url','sigma_level']) - - def alert(self, matches): - for match in matches: - today = strftime("%Y.%m.%d", gmtime()) - timestamp = strftime("%Y-%m-%d"'T'"%H:%M:%S"'.000Z', gmtime()) - headers = {"Content-Type": "application/json"} - - creds = None - if 'es_username' in self.rule and 'es_password' in self.rule: - creds = (self.rule['es_username'], self.rule['es_password']) - - payload = {"tags":"alert","rule": { "name": self.rule['play_title'],"case_template": self.rule['play_id'],"uuid": self.rule['play_id'],"category": self.rule['rule.category']},"event":{ "severity": self.rule['event.severity'],"module": self.rule['event.module'],"dataset": self.rule['event.dataset'],"severity_label": self.rule['sigma_level']},"kibana_pivot": self.rule['kibana_pivot'],"soc_pivot": self.rule['soc_pivot'],"play_url": self.rule['play_url'],"sigma_level": self.rule['sigma_level'],"event_data": match, "@timestamp": timestamp} - url = f"https://{self.rule['es_host']}:{self.rule['es_port']}/logs-playbook.alerts-so/_doc/" - requests.post(url, data=json.dumps(payload), headers=headers, verify=False, auth=creds) - - def get_info(self): - return {'type': 'PlaybookESAlerter'} diff --git a/salt/elastalert/files/modules/so/securityonion-es.py b/salt/elastalert/files/modules/so/securityonion-es.py new file mode 100644 index 000000000..0a82bdce6 --- /dev/null +++ b/salt/elastalert/files/modules/so/securityonion-es.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- + +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + + +from time import gmtime, strftime +import requests,json +from elastalert.alerts import Alerter + +import urllib3 +urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + +class SecurityOnionESAlerter(Alerter): + """ + Use matched data to create alerts in Elasticsearch. + """ + + required_options = set(['detection_title', 'sigma_level']) + optional_fields = ['sigma_category', 'sigma_product', 'sigma_service'] + + def alert(self, matches): + for match in matches: + timestamp = strftime("%Y-%m-%d"'T'"%H:%M:%S"'.000Z', gmtime()) + headers = {"Content-Type": "application/json"} + + creds = None + if 'es_username' in self.rule and 'es_password' in self.rule: + creds = (self.rule['es_username'], self.rule['es_password']) + + # Start building the rule dict + rule_info = { + "name": self.rule['detection_title'], + "uuid": self.rule['detection_public_id'] + } + + # Add optional fields if they are present in the rule + for field in self.optional_fields: + rule_key = field.split('_')[-1] # Assumes field format "sigma_" + if field in self.rule: + rule_info[rule_key] = self.rule[field] + + # Construct the payload with the conditional rule_info + payload = { + "tags": "alert", + "rule": rule_info, + "event": { + "severity": self.rule['event.severity'], + "module": self.rule['event.module'], + "dataset": self.rule['event.dataset'], + "severity_label": self.rule['sigma_level'] + }, + "sigma_level": self.rule['sigma_level'], + "event_data": match, + "@timestamp": timestamp + } + url = f"https://{self.rule['es_host']}:{self.rule['es_port']}/logs-playbook.alerts-so/_doc/" + requests.post(url, data=json.dumps(payload), headers=headers, verify=False, auth=creds) + + def get_info(self): + return {'type': 'SecurityOnionESAlerter'} \ No newline at end of file diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 1c14e61cb..9be17bcca 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1961,6 +1961,7 @@ soc: - rule.name - event.severity_label - event_data.event.dataset + - rule.category - event_data.source.ip - event_data.source.port - event_data.destination.host From 29c964cca12cbaa1e2cb5e2e0a5b066c9953ccab Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Mon, 29 Apr 2024 09:04:52 -0400 Subject: [PATCH 447/777] Set kafka.nodes state to run first to populate kafka.nodes pillar Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/init.sls | 4 ++++ salt/kafka/nodes.sls | 10 ++++------ 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/salt/kafka/init.sls b/salt/kafka/init.sls index acedba3c3..c4351ebfc 100644 --- a/salt/kafka/init.sls +++ b/salt/kafka/init.sls @@ -7,6 +7,10 @@ {% from 'vars/globals.map.jinja' import GLOBALS %} include: +{# Run kafka/nodes.sls before Kafka is enabled, so kafka nodes pillar is setup #} +{% if grains.role in ['so-manager','so-managersearch', 'so-standalone'] %} + - kafka.nodes +{% endif %} {% if GLOBALS.pipeline == "KAFKA" and KAFKAMERGED.enabled %} - kafka.enabled {% else %} diff --git a/salt/kafka/nodes.sls b/salt/kafka/nodes.sls index 5085c6cca..edc5f0701 100644 --- a/salt/kafka/nodes.sls +++ b/salt/kafka/nodes.sls @@ -2,11 +2,10 @@ # or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. -{% from 'vars/globals.map.jinja' import GLOBALS %} -{% if GLOBALS.pipeline == "KAFKA" %} -{% from 'kafka/nodes.map.jinja' import COMBINED_KAFKANODES %} -{# Store kafka pillar in a file rather than memory where values could be lost. Kafka does not support nodeid's changing #} +{% from 'kafka/nodes.map.jinja' import COMBINED_KAFKANODES %} + +{# Write Kafka pillar, so all grid members have access to nodeid of other kafka nodes and their roles #} write_kafka_pillar_yaml: file.managed: - name: /opt/so/saltstack/local/pillar/kafka/nodes.sls @@ -15,5 +14,4 @@ write_kafka_pillar_yaml: - source: salt://kafka/files/managed_node_pillar.jinja - template: jinja - context: - COMBINED_KAFKANODES: {{ COMBINED_KAFKANODES }} -{% endif %} \ No newline at end of file + COMBINED_KAFKANODES: {{ COMBINED_KAFKANODES }} \ No newline at end of file From 086ebe1a7c0d3b274e5824d4a352e50904a69b1e Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Mon, 29 Apr 2024 09:08:14 -0400 Subject: [PATCH 448/777] Split kafka defaults between broker / controller Setup config.map.jinja to update broker / controller / combined node types Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/config.map.jinja | 66 +++++++++++++++++++++++++ salt/kafka/defaults.yaml | 21 ++++++-- salt/kafka/enabled.sls | 5 +- salt/kafka/etc/server.properties.jinja | 4 +- salt/kafka/map.jinja | 16 ++---- salt/kafka/soc_kafka.yaml | 67 +++++++++++++++++--------- 6 files changed, 134 insertions(+), 45 deletions(-) create mode 100644 salt/kafka/config.map.jinja diff --git a/salt/kafka/config.map.jinja b/salt/kafka/config.map.jinja new file mode 100644 index 000000000..8f116c02e --- /dev/null +++ b/salt/kafka/config.map.jinja @@ -0,0 +1,66 @@ +{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one + or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at + https://securityonion.net/license; you may not use this file except in compliance with the + Elastic License 2.0. #} +{% from 'kafka/map.jinja' import KAFKAMERGED %} +{% from 'vars/globals.map.jinja' import GLOBALS %} + +{% set KAFKA_NODES_PILLAR = salt['pillar.get']('kafka:nodes') %} + +{# Create list of KRaft controllers #} +{% set controllers = [] %} +{% for node, values in KAFKA_NODES_PILLAR.items() %} +{% if 'controller' in values['role'] %} +{% do controllers.append(values.nodeid ~ "@" ~ node ~ ":9093") %} +{% endif %} +{% endfor %} + +{% set kafka_controller_quorum_voters = ','.join(controllers) %} + +{# By default all Kafka eligible nodes are given the role of broker, except for + grid MANAGER (broker,controller) until overridden through SOC UI #} +{% set node_type = salt['pillar.get']('kafka:nodes:'+ GLOBALS.hostname + ':role') %} + +{# Generate server.properties for 'broker' , 'controller', 'broker,controller' node types + anything above this line is a configuration needed for ALL Kafka nodes #} +{% if node_type == 'broker' %} +{% do KAFKAMERGED.config.broker.update({'advertised_x_listeners': 'BROKER://'+ GLOBALS.node_ip +':9092' }) %} +{% do KAFKAMERGED.config.broker.update({'controller_x_quorum_x_voters': kafka_controller_quorum_voters }) %} +{% do KAFKAMERGED.config.broker.update({'node_x_id': salt['pillar.get']('kafka:nodes:'+ GLOBALS.hostname +':nodeid') }) %} + +{% endif %} + +{% if node_type == 'controller' %} +{% do KAFKAMERGED.config.controller.update({'controller_x_quorum_x_voters': kafka_controller_quorum_voters }) %} +{% do KAFKAMERGED.config.controller.update({'node_x_id': salt['pillar.get']('kafka:nodes:'+ GLOBALS.hostname +':nodeid') }) %} + +{% endif %} + +{# Kafka nodes of this type are not recommended for use outside of development / testing. #} +{% if node_type == 'broker,controller' %} +{% do KAFKAMERGED.config.broker.update({'advertised_x_listeners': 'BROKER://'+ GLOBALS.node_ip +':9092' }) %} +{% do KAFKAMERGED.config.broker.update({'controller_x_quorum_x_voters': kafka_controller_quorum_voters }) %} +{% do KAFKAMERGED.config.broker.update({'node_x_id': salt['pillar.get']('kafka:nodes:'+ GLOBALS.hostname +':nodeid') }) %} +{% do KAFKAMERGED.config.broker.update({'process_x_roles': 'broker,controller' }) %} + +{% do KAFKAMERGED.config.broker.update({ + 'listeners': + KAFKAMERGED.config.broker.listeners + + ',' + + KAFKAMERGED.config.controller.listeners }) + %} + +{% do KAFKAMERGED.config.broker.update({ + 'listener_x_security_x_protocol_x_map': + KAFKAMERGED.config.broker.listener_x_security_x_protocol_x_map + + ',' + + KAFKAMERGED.config.controller.listener_x_security_x_protocol_x_map }) + %} + +{% endif %} + +{% if 'broker' in node_type %} +{% set KAFKACONFIG = KAFKAMERGED.config.broker %} +{% else %} +{% set KAFKACONFIG = KAFKAMERGED.config.controller %} +{% endif %} \ No newline at end of file diff --git a/salt/kafka/defaults.yaml b/salt/kafka/defaults.yaml index 91f55a07d..8dcd70b98 100644 --- a/salt/kafka/defaults.yaml +++ b/salt/kafka/defaults.yaml @@ -1,14 +1,16 @@ kafka: enabled: False + cluster_id: + kafka_pass: + kafka_controllers: [] config: - server: + broker: advertised_x_listeners: auto_x_create_x_topics_x_enable: true - controller_x_listener_x_names: CONTROLLER controller_x_quorum_x_voters: inter_x_broker_x_listener_x_name: BROKER - listeners: BROKER://0.0.0.0:9092,CONTROLLER://0.0.0.0:9093 - listener_x_security_x_protocol_x_map: CONTROLLER:SSL,BROKER:SSL + listeners: BROKER://0.0.0.0:9092 + listener_x_security_x_protocol_x_map: BROKER:SSL log_x_dirs: /nsm/kafka/data log_x_retention_x_check_x_interval_x_ms: 300000 log_x_retention_x_hours: 168 @@ -37,3 +39,14 @@ kafka: ssl_x_keystore_x_location: /etc/pki/kafka.jks ssl_x_keystore_x_type: JKS ssl_x_keystore_x_password: changeit + controller: + controller_x_listener_x_names: CONTROLLER + controller_x_quorum_x_voters: + listeners: CONTROLLER://0.0.0.0:9093 + listener_x_security_x_protocol_x_map: CONTROLLER:SSL + log_x_dirs: /nsm/kafka/data + log_x_retention_x_check_x_interval_x_ms: 300000 + log_x_retention_x_hours: 168 + log_x_segment_x_bytes: 1073741824 + node_x_id: + process_x_roles: controller \ No newline at end of file diff --git a/salt/kafka/enabled.sls b/salt/kafka/enabled.sls index d05a49a0e..ec2dc8e46 100644 --- a/salt/kafka/enabled.sls +++ b/salt/kafka/enabled.sls @@ -7,12 +7,9 @@ {% if sls.split('.')[0] in allowed_states %} {% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'docker/docker.map.jinja' import DOCKER %} -{% from 'kafka/nodes.map.jinja' import COMBINED_KAFKANODES as KAFKANODES %} +{% set KAFKANODES = salt['pillar.get']('kafka:nodes') %} include: - {% if grains.role in ['so-manager', 'so-managersearch', 'so-standalone'] %} - - kafka.nodes - {% endif %} - kafka.controllers - elasticsearch.ca - kafka.sostatus diff --git a/salt/kafka/etc/server.properties.jinja b/salt/kafka/etc/server.properties.jinja index df5632ba9..fb0c785cf 100644 --- a/salt/kafka/etc/server.properties.jinja +++ b/salt/kafka/etc/server.properties.jinja @@ -3,5 +3,5 @@ https://securityonion.net/license; you may not use this file except in compliance with the Elastic License 2.0. #} -{% from 'kafka/map.jinja' import KAFKAMERGED -%} -{{ KAFKAMERGED.config.server | yaml(False) | replace("_x_", ".") }} +{% from 'kafka/config.map.jinja' import KAFKACONFIG -%} +{{ KAFKACONFIG | yaml(False) | replace("_x_", ".") }} diff --git a/salt/kafka/map.jinja b/salt/kafka/map.jinja index 56f85144a..996e5dedf 100644 --- a/salt/kafka/map.jinja +++ b/salt/kafka/map.jinja @@ -3,18 +3,8 @@ https://securityonion.net/license; you may not use this file except in compliance with the Elastic License 2.0. #} +{# This is only used to determine if Kafka is enabled / disabled. Configuration is found in kafka/config.map.jinja #} +{# kafka/config.map.jinja depends on there being a kafka nodes pillar being populated #} + {% import_yaml 'kafka/defaults.yaml' as KAFKADEFAULTS %} {% set KAFKAMERGED = salt['pillar.get']('kafka', KAFKADEFAULTS.kafka, merge=True) %} -{% from 'vars/globals.map.jinja' import GLOBALS %} -{% from 'kafka/nodes.map.jinja' import COMBINED_KAFKANODES %} - -{% do KAFKAMERGED.config.server.update({ 'node_x_id': salt['pillar.get']('kafka:nodes:' ~ GLOBALS.hostname ~ ':nodeid')}) %} -{% do KAFKAMERGED.config.server.update({'advertised_x_listeners': 'BROKER://' ~ GLOBALS.node_ip ~ ':9092'}) %} - -{% set combined = [] %} -{% for hostname, data in COMBINED_KAFKANODES.items() %} - {% do combined.append(data.nodeid ~ "@" ~ hostname ~ ":9093") %} -{% endfor %} -{% set kraft_controller_quorum_voters = ','.join(combined) %} - -{% do KAFKAMERGED.config.server.update({'controller_x_quorum_x_voters': kraft_controller_quorum_voters}) %} diff --git a/salt/kafka/soc_kafka.yaml b/salt/kafka/soc_kafka.yaml index 47ff05719..2216aaaa7 100644 --- a/salt/kafka/soc_kafka.yaml +++ b/salt/kafka/soc_kafka.yaml @@ -8,12 +8,15 @@ kafka: advanced: True sensitive: True helpLink: kafka.html + kafkapass: + description: The password to use for the Kafka certificates. + sensitive: True + helpLink: kafka.html + kafka_controllers: + description: A list of Security Onion grid members that should act as KRaft controllers for this Kafka cluster. By default, the grid manager will use a 'combined' role where it will act as both a broker and controller. All other nodes will default to broker roles. + helpLink: kafka.html config: - kafkapass: - description: The password to use for the Kafka certificates. - sensitive: True - helpLink: kafka.html - server: + broker: advertised_x_listeners: description: Specify the list of listeners (hostname and port) that Kafka brokers provide to clients for communication. title: advertised.listeners @@ -23,14 +26,6 @@ kafka: title: auto.create.topics.enable forcedType: bool helpLink: kafka.html - controller_x_listener_x_names: - description: Set listeners used by the controller in a comma-seperated list. - title: controller.listener.names - helpLink: kafka.html - controller_x_quorum_x_voters: - description: A comma-seperated list of ID and endpoint information mapped for a set of voters. - title: controller.quorum.voters - helpLink: kafka.html inter_x_broker_x_listener_x_name: description: The name of the listener used for inter-broker communication. title: inter.broker.listener.name @@ -60,12 +55,6 @@ kafka: title: log.segment.bytes forcedType: int helpLink: kafka.html - node_x_id: - description: The node ID corresponds to the roles performed by this process whenever process.roles is populated. - title: node.id - forcedType: int - readonly: True - helpLink: kafka.html num_x_io_x_threads: description: The number of threads used by Kafka. title: num.io.threads @@ -92,10 +81,9 @@ kafka: forcedType: int helpLink: kafka.html process_x_roles: - description: The roles performed by Kafka node. Default is to act as 'broker' only. + description: The role performed by Kafka brokers. title: process.roles - regex: ^(broker|controller|broker,controller|controller,broker)$ - regexFailureMessage: Valid values include 'broker' 'controller' or 'broker,controller' + readonly: True helpLink: kafka.html socket_x_receive_x_buffer_x_bytes: description: Size, in bytes of the SO_RCVBUF buffer. A value of -1 will use the OS default. @@ -174,3 +162,38 @@ kafka: title: ssl.truststore.password sensitive: True helpLink: kafka.html + controller: + controller_x_listener_x_names: + description: Set listeners used by the controller in a comma-seperated list. + title: controller.listener.names + helpLink: kafka.html + listeners: + description: Set of URIs that is listened on and the listener names in a comma-seperated list. + helpLink: kafka.html + listener_x_security_x_protocol_x_map: + description: Comma-seperated mapping of listener name and security protocols. + title: listener.security.protocol.map + helpLink: kafka.html + log_x_dirs: + description: Where Kafka logs are stored within the Docker container. + title: log.dirs + helpLink: kafka.html + log_x_retention_x_check_x_interval_x_ms: + description: Frequency at which log files are checked if they are qualified for deletion. + title: log.retention.check.interval.ms + helpLink: kafka.html + log_x_retention_x_hours: + description: How long, in hours, a log file is kept. + title: log.retention.hours + forcedType: int + helpLink: kafka.html + log_x_segment_x_bytes: + description: The maximum allowable size for a log file. + title: log.segment.bytes + forcedType: int + helpLink: kafka.html + process_x_roles: + description: The role performed by KRaft controller node. + title: process.roles + readonly: True + helpLink: kafka.html \ No newline at end of file From 529c8d7cf21bae87fd6d81d12327baed0ad600af Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Mon, 29 Apr 2024 11:35:46 -0400 Subject: [PATCH 449/777] Remove salt reactor for Kafka Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/controllers.sls | 20 -------------------- salt/salt/files/reactor.conf | 3 --- salt/salt/master.sls | 5 ----- 3 files changed, 28 deletions(-) delete mode 100644 salt/kafka/controllers.sls delete mode 100644 salt/salt/files/reactor.conf diff --git a/salt/kafka/controllers.sls b/salt/kafka/controllers.sls deleted file mode 100644 index c6df07b0c..000000000 --- a/salt/kafka/controllers.sls +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} -{% import_yaml 'kafka/defaults.yaml' as KAFKADEFAULTS %} - -{% set process_x_roles = salt['pillar.get']('kafka:config:server:process_x_roles', KAFKADEFAULTS.kafka.config.server.process_x_roles, merge=true) %} - -{# Send an event to the salt master at every highstate. Containing the minions process_x_roles. - if no value is set for this minion then the default in kafka/defaults.yaml is used #} -push_event_to_master: - event.send: - - name: kafka/controllers_update - - data: - id: {{ grains['id'] }} - process_x_roles: {{ process_x_roles }} -{% endif %} diff --git a/salt/salt/files/reactor.conf b/salt/salt/files/reactor.conf deleted file mode 100644 index 129305572..000000000 --- a/salt/salt/files/reactor.conf +++ /dev/null @@ -1,3 +0,0 @@ -reactor: - - 'kafka/controllers_update': - - salt://reactor/kafka.sls \ No newline at end of file diff --git a/salt/salt/master.sls b/salt/salt/master.sls index d28f0b1bd..51acca61d 100644 --- a/salt/salt/master.sls +++ b/salt/salt/master.sls @@ -37,11 +37,6 @@ engines_config: - name: /etc/salt/master.d/engines.conf - source: salt://salt/files/engines.conf -reactor_config: - file.managed: - - name: /etc/salt/master.d/reactor.conf - - source: salt://salt/files/reactor.conf - salt_master_service: service.running: - name: salt-master From fd9a91420db18527e82940b6fbceb821a94d09a2 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Mon, 29 Apr 2024 11:37:24 -0400 Subject: [PATCH 450/777] Use SOC UI to configure list of KRaft (Kafka) controllers for cluster Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/config.map.jinja | 32 ++++++++++++++++------------ salt/kafka/enabled.sls | 1 - salt/kafka/nodes.map.jinja | 42 +++++++++++++++++++++++++------------ 3 files changed, 48 insertions(+), 27 deletions(-) diff --git a/salt/kafka/config.map.jinja b/salt/kafka/config.map.jinja index 8f116c02e..c9f3e79e2 100644 --- a/salt/kafka/config.map.jinja +++ b/salt/kafka/config.map.jinja @@ -7,13 +7,24 @@ {% set KAFKA_NODES_PILLAR = salt['pillar.get']('kafka:nodes') %} +{% set KAFKA_CONTROLLERS_PILLAR = salt['pillar.get']('kafka:kafka_controllers', default=None) %} + {# Create list of KRaft controllers #} {% set controllers = [] %} -{% for node, values in KAFKA_NODES_PILLAR.items() %} -{% if 'controller' in values['role'] %} -{% do controllers.append(values.nodeid ~ "@" ~ node ~ ":9093") %} -{% endif %} -{% endfor %} + +{% if KAFKA_CONTROLLERS_PILLAR != none %} +{% for node in KAFKA_CONTROLLERS_PILLAR %} +{# Check that the user input for kafka_controllers pillar exists as a kafka:node value #} +{% if node in KAFKA_NODES_PILLAR %} +{% do controllers.append(KAFKA_NODES_PILLAR[node]['nodeid'] ~ '@' ~ node ~ ':9093') %} +{% endif %} +{% endfor %} +{% endif %} +{# Ensure in the event that the SOC controllers pillar has a single hostname and that hostname doesn't exist in kafka:nodes + that a controller is still set for the Kafka cluster. Defaulting to the grid manager #} +{% if controllers | length < 1 %} +{% do controllers.append(KAFKA_NODES_PILLAR[GLOBALS.manager]['nodeid'] ~ "@" ~ GLOBALS.manager ~ ":9093") %} +{% endif %} {% set kafka_controller_quorum_voters = ','.join(controllers) %} @@ -44,17 +55,12 @@ {% do KAFKAMERGED.config.broker.update({'process_x_roles': 'broker,controller' }) %} {% do KAFKAMERGED.config.broker.update({ - 'listeners': - KAFKAMERGED.config.broker.listeners - + ',' - + KAFKAMERGED.config.controller.listeners }) + 'listeners': KAFKAMERGED.config.broker.listeners + ',' + KAFKAMERGED.config.controller.listeners }) %} {% do KAFKAMERGED.config.broker.update({ - 'listener_x_security_x_protocol_x_map': - KAFKAMERGED.config.broker.listener_x_security_x_protocol_x_map - + ',' - + KAFKAMERGED.config.controller.listener_x_security_x_protocol_x_map }) + 'listener_x_security_x_protocol_x_map': KAFKAMERGED.config.broker.listener_x_security_x_protocol_x_map + + ',' + KAFKAMERGED.config.controller.listener_x_security_x_protocol_x_map }) %} {% endif %} diff --git a/salt/kafka/enabled.sls b/salt/kafka/enabled.sls index ec2dc8e46..78e0d87d9 100644 --- a/salt/kafka/enabled.sls +++ b/salt/kafka/enabled.sls @@ -10,7 +10,6 @@ {% set KAFKANODES = salt['pillar.get']('kafka:nodes') %} include: - - kafka.controllers - elasticsearch.ca - kafka.sostatus - kafka.config diff --git a/salt/kafka/nodes.map.jinja b/salt/kafka/nodes.map.jinja index 36f789259..9b4979e92 100644 --- a/salt/kafka/nodes.map.jinja +++ b/salt/kafka/nodes.map.jinja @@ -1,12 +1,27 @@ -{% set current_kafkanodes = salt.saltutil.runner('mine.get', tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-receiver', fun='network.ip_addrs', tgt_type='compound') %} -{% set STORED_KAFKANODES = salt['pillar.get']('kafka', {}) %} +{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one + or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at + https://securityonion.net/license; you may not use this file except in compliance with the + Elastic License 2.0. #} + +{# USED TO GENERATE PILLAR/KAFKA/NODES.SLS. #} +{% import_yaml 'kafka/defaults.yaml' as KAFKADEFAULTS %} +{% from 'vars/globals.map.jinja' import GLOBALS %} + +{% set process_x_roles = KAFKADEFAULTS.kafka.config.broker.process_x_roles %} + +{% set current_kafkanodes = salt.saltutil.runner( + 'mine.get', + tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-receiver', + fun='network.ip_addrs', + tgt_type='compound') %} + +{% set STORED_KAFKANODES = salt['pillar.get']('kafka:nodes', default=None) %} {% set existing_ids = [] %} {# Check STORED_KAFKANODES for existing kafka nodes and pull their IDs so they are not reused across the grid #} -{# {% if STORED_KAFKANODES.get('nodes', {}).items() | length > 0 %} #} {% if STORED_KAFKANODES != none %} -{% for node, values in STORED_KAFKANODES.nodes.items() %} +{% for node, values in STORED_KAFKANODES.items() %} {% if values.get('nodeid') %} {% do existing_ids.append(values['nodeid']) %} {% endif %} @@ -16,7 +31,6 @@ {# Create list of possible node ids #} {% set all_possible_ids = range(1, 65536)|list %} -{# Don't like the below loop because the higher the range for all_possible_ids the more time spent on loop #} {# Create list of available node ids by looping through all_possible_ids and ensuring it isn't in existing_ids #} {% set available_ids = [] %} {% for id in all_possible_ids %} @@ -29,14 +43,17 @@ {% set NEW_KAFKANODES = {} %} {% for minionid, ip in current_kafkanodes.items() %} {% set hostname = minionid.split('_')[0] %} -{# {% if STORED_KAFKANODES.get('nodes', {}).items() | length > 0 and hostname not in STORED_KAFKANODES.nodes %} #} -{% if STORED_KAFKANODES != none and hostname not in STORED_KAFKANODES.nodes %} -{% set new_id = available_ids.pop(0) %} -{% do NEW_KAFKANODES.update({hostname: {'nodeid': new_id, 'ip': ip[0]}}) %} +{# Override the default process_x_roles for manager and set to 'broker,controller'. Changes from SOC UI will overwrite this #} +{% if hostname == GLOBALS.manager %} +{% set process_x_roles = 'broker,controller' %} {% endif %} -{% if hostname not in NEW_KAFKANODES %} +{% if STORED_KAFKANODES != none and hostname not in STORED_KAFKANODES.items() %} {% set new_id = available_ids.pop(0) %} -{% do NEW_KAFKANODES.update({hostname: {'nodeid': new_id, 'ip': ip[0]}}) %} +{% do NEW_KAFKANODES.update({hostname: {'nodeid': new_id, 'ip': ip[0], 'role': process_x_roles }}) %} +{% endif %} +{% if hostname not in NEW_KAFKANODES.items() %} +{% set new_id = available_ids.pop(0) %} +{% do NEW_KAFKANODES.update({hostname: {'nodeid': new_id, 'ip': ip[0], 'role': process_x_roles }}) %} {% endif %} {% endfor %} @@ -45,9 +62,8 @@ {% for node, details in NEW_KAFKANODES.items() %} {% do COMBINED_KAFKANODES.update({node: details}) %} {% endfor %} -{# {% if STORED_KAFKANODES.get('nodes', {}).items() | length > 0 %} #} {% if STORED_KAFKANODES != none %} -{% for node, details in STORED_KAFKANODES.nodes.items() %} +{% for node, details in STORED_KAFKANODES.items() %} {% do COMBINED_KAFKANODES.update({node: details}) %} {% endfor %} {% endif %} From 11055b1d32d0b5a5b7f569d00f1c33eed6cf0f23 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Mon, 29 Apr 2024 14:09:09 -0400 Subject: [PATCH 451/777] Rename kafkapass -> kafka_pass Run so-kafka-clusterid within nodes.sls state so switchover is consistent Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/nodes.sls | 10 +++++++++- salt/kafka/soc_kafka.yaml | 2 +- salt/kafka/storage.sls | 10 +--------- salt/manager/tools/sbin/so-kafka-clusterid | 10 +++++----- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/salt/kafka/nodes.sls b/salt/kafka/nodes.sls index edc5f0701..7cafb10bc 100644 --- a/salt/kafka/nodes.sls +++ b/salt/kafka/nodes.sls @@ -4,6 +4,7 @@ # Elastic License 2.0. {% from 'kafka/nodes.map.jinja' import COMBINED_KAFKANODES %} +{% set kafka_cluster_id = salt['pillar.get']('kafka:cluster_id', default=None) %} {# Write Kafka pillar, so all grid members have access to nodeid of other kafka nodes and their roles #} write_kafka_pillar_yaml: @@ -14,4 +15,11 @@ write_kafka_pillar_yaml: - source: salt://kafka/files/managed_node_pillar.jinja - template: jinja - context: - COMBINED_KAFKANODES: {{ COMBINED_KAFKANODES }} \ No newline at end of file + COMBINED_KAFKANODES: {{ COMBINED_KAFKANODES }} + + +{% if kafka_cluster_id is none %} +generate_kafka_cluster_id: + cmd.run: + - name: /usr/sbin/so-kafka-clusterid +{% endif %} \ No newline at end of file diff --git a/salt/kafka/soc_kafka.yaml b/salt/kafka/soc_kafka.yaml index 2216aaaa7..505469d6b 100644 --- a/salt/kafka/soc_kafka.yaml +++ b/salt/kafka/soc_kafka.yaml @@ -8,7 +8,7 @@ kafka: advanced: True sensitive: True helpLink: kafka.html - kafkapass: + kafka_pass: description: The password to use for the Kafka certificates. sensitive: True helpLink: kafka.html diff --git a/salt/kafka/storage.sls b/salt/kafka/storage.sls index fbb7c7328..507c199c6 100644 --- a/salt/kafka/storage.sls +++ b/salt/kafka/storage.sls @@ -6,15 +6,7 @@ {% from 'allowed_states.map.jinja' import allowed_states %} {% if sls.split('.')[0] in allowed_states %} {% from 'vars/globals.map.jinja' import GLOBALS %} -{% set kafka_cluster_id = salt['pillar.get']('kafka:cluster_id', default=None) %} - -{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone'] %} -{% if kafka_cluster_id is none %} -generate_kafka_cluster_id: - cmd.run: - - name: /usr/sbin/so-kafka-clusterid -{% endif %} -{% endif %} +{% set kafka_cluster_id = salt['pillar.get']('kafka:cluster_id') %} {# Initialize kafka storage if it doesn't already exist. Just looking for meta.properties in /nsm/kafka/data #} {% if not salt['file.file_exists']('/nsm/kafka/data/meta.properties') %} diff --git a/salt/manager/tools/sbin/so-kafka-clusterid b/salt/manager/tools/sbin/so-kafka-clusterid index 829e4fc87..eb0701b8b 100644 --- a/salt/manager/tools/sbin/so-kafka-clusterid +++ b/salt/manager/tools/sbin/so-kafka-clusterid @@ -18,12 +18,12 @@ else source $(dirname $0)/../../../common/tools/sbin/so-common fi -if ! grep -q "^ cluster_id:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then +if ! grep -q "^ cluster_id:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then kafka_cluster_id=$(get_random_value 22) - echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls + echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls fi -if ! grep -q "^ kafkapass:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then +if ! grep -q "^ kafka_pass:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then kafkapass=$(get_random_value) - echo ' kafkapass: '$kafkapass >> $local_salt_dir/pillar/kafka/soc_kafka.sls -fi \ No newline at end of file + echo ' kafka_pass: '$kafkapass >> $local_salt_dir/pillar/kafka/soc_kafka.sls +fit \ No newline at end of file From a663bf63c61a5f51c53b7116a067a3fa94bc03de Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 29 Apr 2024 14:22:04 -0400 Subject: [PATCH 452/777] set Suricata as default pcap engine for eval --- setup/so-functions | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/setup/so-functions b/setup/so-functions index e49d0dbea..60908e0d4 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1330,6 +1330,10 @@ create_global() { echo " influxdb_host: '$HOSTNAME'" >> $global_pillar_file echo " registry_host: '$HOSTNAME'" >> $global_pillar_file echo " endgamehost: '$ENDGAMEHOST'" >> $global_pillar_file + + if [ "$install_type" = 'EVAL' ]; then + echo " pcapengine: SURICATA" >> $global_pillar_file + fi } create_sensoroni_pillar() { From 529bc01d6934acc31594779db68fe0b56913024f Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Mon, 29 Apr 2024 14:53:52 -0400 Subject: [PATCH 453/777] Add missing configuration for nodes running Kafka broker role only Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/config.map.jinja | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/salt/kafka/config.map.jinja b/salt/kafka/config.map.jinja index c9f3e79e2..4e82eac42 100644 --- a/salt/kafka/config.map.jinja +++ b/salt/kafka/config.map.jinja @@ -39,6 +39,12 @@ {% do KAFKAMERGED.config.broker.update({'controller_x_quorum_x_voters': kafka_controller_quorum_voters }) %} {% do KAFKAMERGED.config.broker.update({'node_x_id': salt['pillar.get']('kafka:nodes:'+ GLOBALS.hostname +':nodeid') }) %} +{# Nodes with only the 'broker' role need to have the below settings for communicating with controller nodes #} +{% do KAFKAMERGED.config.broker.update({'controller_x_listener_x_names': KAFKAMERGED.config.controller.controller_x_listener_x_names }) %} +{% do KAFKAMERGED.config.broker.update({ + 'listener_x_security_x_protocol_x_map': KAFKAMERGED.config.broker.listener_x_security_x_protocol_x_map + + ',' + KAFKAMERGED.config.controller.listener_x_security_x_protocol_x_map }) + %} {% endif %} {% if node_type == 'controller' %} @@ -50,6 +56,7 @@ {# Kafka nodes of this type are not recommended for use outside of development / testing. #} {% if node_type == 'broker,controller' %} {% do KAFKAMERGED.config.broker.update({'advertised_x_listeners': 'BROKER://'+ GLOBALS.node_ip +':9092' }) %} +{% do KAFKAMERGED.config.broker.update({'controller_x_listener_x_names': KAFKAMERGED.config.controller.controller_x_listener_x_names }) %} {% do KAFKAMERGED.config.broker.update({'controller_x_quorum_x_voters': kafka_controller_quorum_voters }) %} {% do KAFKAMERGED.config.broker.update({'node_x_id': salt['pillar.get']('kafka:nodes:'+ GLOBALS.hostname +':nodeid') }) %} {% do KAFKAMERGED.config.broker.update({'process_x_roles': 'broker,controller' }) %} From a6e8b25969c01439339f16a3f4f2d88ba7d4cb55 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Mon, 29 Apr 2024 15:48:57 -0400 Subject: [PATCH 454/777] Add Kafka connectivity between manager - > receiver nodes. Add connectivity to Kafka between other node types that may need to publish to Kafka. Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/firewall/defaults.yaml | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/salt/firewall/defaults.yaml b/salt/firewall/defaults.yaml index 0b6d06eda..6dd3fead3 100644 --- a/salt/firewall/defaults.yaml +++ b/salt/firewall/defaults.yaml @@ -405,7 +405,6 @@ firewall: - docker_registry - influxdb - sensoroni - - kafka searchnode: portgroups: - redis @@ -433,6 +432,7 @@ firewall: - elastic_agent_data - elastic_agent_update - sensoroni + - kafka receiver: portgroups: - yum @@ -442,6 +442,7 @@ firewall: - elastic_agent_data - elastic_agent_update - sensoroni + - kafka analyst: portgroups: - nginx @@ -566,6 +567,7 @@ firewall: - elastic_agent_update - localrules - sensoroni + - kafka fleet: portgroups: - elasticsearch_rest @@ -613,6 +615,7 @@ firewall: - elastic_agent_data - elastic_agent_update - sensoroni + - kafka heavynode: portgroups: - redis @@ -625,6 +628,7 @@ firewall: - elastic_agent_data - elastic_agent_update - sensoroni + - kafka receiver: portgroups: - yum @@ -761,7 +765,7 @@ firewall: - beats_5044 - beats_5644 - beats_5056 - - redis + - kafka - elasticsearch_node - elastic_agent_control - elastic_agent_data @@ -813,6 +817,7 @@ firewall: - redis - elasticsearch_rest - elasticsearch_node + - kafka heavynode: portgroups: - docker_registry @@ -822,6 +827,7 @@ firewall: - redis - elasticsearch_rest - elasticsearch_node + - kafka receiver: portgroups: - yum @@ -1289,6 +1295,9 @@ firewall: - redis - beats_5644 - kafka + manager: + portgroups: + - kafka managersearch: portgroups: - redis From 192d91565dc1f4b7a1ebcb1597026485a9361583 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Mon, 29 Apr 2024 16:34:29 -0400 Subject: [PATCH 455/777] Update final pipeline timestamp format for event.module system events Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 b/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 index 89216077a..524d56071 100644 --- a/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 +++ b/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 @@ -80,7 +80,7 @@ { "set": { "if": "ctx.network?.type == 'ipv6'", "override": true, "field": "destination.ipv6", "value": "true" } }, { "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.dataset", "value": "import" } }, { "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.namespace", "value": "so" } }, - { "date": { "if": "ctx.event?.module == 'system'", "field": "event.created", "target_field": "@timestamp", "formats": ["yyyy-MM-dd'T'HH:mm:ss.SSSSSS'Z'"] } }, + { "date": { "if": "ctx.event?.module == 'system'", "field": "event.created", "target_field": "@timestamp", "formats": ["yyyy-MM-dd'T'HH:mm:ss.SSSX"] } }, { "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } }, { "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } }, { "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } }, From fadb6e2aa9cae3278f384b6e16016513f928e9fa Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Mon, 29 Apr 2024 16:57:48 -0400 Subject: [PATCH 456/777] Re-add original timestamp format + ignore failures with this processor Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 b/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 index 524d56071..c3e70ec2c 100644 --- a/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 +++ b/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 @@ -80,7 +80,7 @@ { "set": { "if": "ctx.network?.type == 'ipv6'", "override": true, "field": "destination.ipv6", "value": "true" } }, { "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.dataset", "value": "import" } }, { "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.namespace", "value": "so" } }, - { "date": { "if": "ctx.event?.module == 'system'", "field": "event.created", "target_field": "@timestamp", "formats": ["yyyy-MM-dd'T'HH:mm:ss.SSSX"] } }, + { "date": { "if": "ctx.event?.module == 'system'", "field": "event.created", "target_field": "@timestamp","ignore_failure": true, "formats": ["yyyy-MM-dd'T'HH:mm:ss.SSSX","yyyy-MM-dd'T'HH:mm:ss.SSSSSS'Z'"] } }, { "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } }, { "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } }, { "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } }, From ddf662bdb434142a6631572f69825db5c5974a37 Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Mon, 29 Apr 2024 16:22:30 -0600 Subject: [PATCH 457/777] Mark Repos as Community Indicate that detection rules pulled from configured repos should be marked as Community rules. --- salt/soc/defaults.yaml | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 9be17bcca..051d35541 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -580,7 +580,7 @@ soc: - file.source - file.mime_type - log.id.fuid - - event.dataset + - event.dataset ':suricata:': - soc_timestamp - source.ip @@ -1270,6 +1270,7 @@ soc: - repo: https://github.com/Security-Onion-Solutions/securityonion-resources license: Elastic-2.0 folder: sigma/stable + community: true sigmaRulePackages: - core - emerging_threats_addon @@ -1327,6 +1328,7 @@ soc: rulesRepos: - repo: https://github.com/Security-Onion-Solutions/securityonion-yara license: DRL + community: true yaraRulesFolder: /opt/sensoroni/yara/rules stateFilePath: /opt/sensoroni/fingerprints/strelkaengine.state suricataengine: @@ -1917,7 +1919,7 @@ soc: query: 'event.dataset:soc.detections | groupby soc.detection_type soc.error_type | groupby soc.error_analysis | groupby soc.rule.name | groupby soc.error_message' - + job: alerts: advanced: false @@ -1955,7 +1957,7 @@ soc: - event_data.destination.host - event_data.destination.port - event_data.process.executable - - event_data.process.pid + - event_data.process.pid ':sigma:': - soc_timestamp - rule.name @@ -1967,7 +1969,7 @@ soc: - event_data.destination.host - event_data.destination.port - event_data.process.executable - - event_data.process.pid + - event_data.process.pid ':strelka:': - soc_timestamp - file.name From 4d6124f982ccf63396a2020558db4601a642d6aa Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Tue, 30 Apr 2024 10:18:34 -0400 Subject: [PATCH 458/777] FIX: Elasticsearch min_age regex #12885 --- salt/elasticsearch/soc_elasticsearch.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/salt/elasticsearch/soc_elasticsearch.yaml b/salt/elasticsearch/soc_elasticsearch.yaml index 210697bba..42262a178 100644 --- a/salt/elasticsearch/soc_elasticsearch.yaml +++ b/salt/elasticsearch/soc_elasticsearch.yaml @@ -121,7 +121,7 @@ elasticsearch: cold: min_age: description: Minimum age of index. ex. 30d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. - regex: ^\[0-9\]{1,5}d$ + regex: ^[0-9]{1,5}d$ forcedType: string global: True helpLink: elasticsearch.html @@ -134,7 +134,7 @@ elasticsearch: warm: min_age: description: Minimum age of index. ex. 30d - This determines when the index should be moved to the warm tier. Nodes in the warm tier generally don’t need to be as fast as those in the hot tier. - regex: ^\[0-9\]{1,5}d$ + regex: ^[0-9]{1,5}d$ forcedType: string global: True actions: @@ -147,7 +147,7 @@ elasticsearch: delete: min_age: description: Minimum age of index. ex. 90d - This determines when the index should be deleted. - regex: ^\[0-9\]{1,5}d$ + regex: ^[0-9]{1,5}d$ forcedType: string global: True helpLink: elasticsearch.html @@ -276,7 +276,7 @@ elasticsearch: warm: min_age: description: Minimum age of index. ex. 30d - This determines when the index should be moved to the warm tier. Nodes in the warm tier generally don’t need to be as fast as those in the hot tier. - regex: ^\[0-9\]{1,5}d$ + regex: ^[0-9]{1,5}d$ forcedType: string global: True advanced: True @@ -303,7 +303,7 @@ elasticsearch: cold: min_age: description: Minimum age of index. ex. 30d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. - regex: ^\[0-9\]{1,5}d$ + regex: ^[0-9]{1,5}d$ forcedType: string global: True advanced: True @@ -319,7 +319,7 @@ elasticsearch: delete: min_age: description: Minimum age of index. This determines when the index should be deleted. - regex: ^\[0-9\]{1,5}d$ + regex: ^[0-9]{1,5}d$ forcedType: string global: True advanced: True From 9c83a52c6d22db4a3508e6fafe19e0e61b635da7 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 30 Apr 2024 12:01:31 -0400 Subject: [PATCH 459/777] Add Kafka output to elastic-fleet setup. Includes separating topics by event.module with fallback to default-logs if no event.module is specified or doesn't match processors Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- .../tools/sbin_jinja/so-elastic-fleet-setup | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup index 361469b26..e01360687 100755 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup @@ -77,6 +77,23 @@ curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fl printf "\n\n" {%- endif %} +printf "\nCreate Kafka Output Config if node is not an Import or Eval install\n" +{% if grains.role not in ['so-import', 'so-eval'] %} +KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt) +KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key) +{# KAFKACA=$(openssl x509 -in $INTCA) #} +KAFKACA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt) +KAFKA_OUTPUT_VERSION="2.6.0" +JSON_STRING=$( jq -n \ + --arg KAFKACRT "$KAFKACRT" \ + --arg KAFKAKEY "$KAFKAKEY" \ + --arg KAFKACA "$KAFKACA" \ + --arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \ + '{ "name": "grid_kafka", "type": "kafka", "hosts": [ "{{ GLOBALS.manager }}:9092", "{{ GLOBALS.manager_ip }}:9092" ], "is_default": false, "is_default_monitoring": false, "config_yaml": "", "ssl": { "certificate_authorities": [ $KAFKACA ], "certificate": $KAFKACRT, "key": $KAFKAKEY, "verification_mode": "full" }, "proxy_id": null, "client_id": "Elastic", "version": $KAFKA_OUTPUT_VERSION, "compression": "none", "auth_type": "ssl", "partition": "round_robin", "round_robin": { "group_events": 1 }, "topics": [ { "topic": "zeek-logs", "when": { "type": "equals", "condition": "event.module:zeek" } }, { "topic": "suricata-logs", "when": { "type": "equals", "condition": "event.module:suricata" } }, { "topic": "strelka-logs", "when": { "type": "equals", "condition": "event.module:strelka" } }, { "topic": "opencanary-logs", "when": { "type": "equals", "condition": "event.module:opencanary" } }, { "topic": "system-logs", "when": { "type": "equals", "condition": "event.module:system" } }, { "topic": "kratos-logs", "when": { "type": "equals", "condition": "event.module:kratos" } }, { "topic": "soc-logs", "when": { "type": "equals", "condition": "event.module:soc" } }, { "topic": "rita-logs", "when": { "type": "equals", "condition": "event.module:rita" } }, { "topic": "default-logs" } ], "headers": [ { "key": "", "value": "" } ], "timeout": 30, "broker_timeout": 30, "required_acks": 1 }' + ) +curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" +{% endif %} + # Add Manager Hostname & URL Base to Fleet Host URLs printf "\nAdd SO-Manager Fleet URL\n" if [ "{{ GLOBALS.hostname }}" = "{{ GLOBALS.url_base }}" ]; then From fcc4050f86da9c09a76d74bf40981a3f86abc632 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 30 Apr 2024 12:59:53 -0400 Subject: [PATCH 460/777] Add id to grid-kafka fleet output policy Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup index e01360687..14ab58b45 100755 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup @@ -89,7 +89,7 @@ JSON_STRING=$( jq -n \ --arg KAFKAKEY "$KAFKAKEY" \ --arg KAFKACA "$KAFKACA" \ --arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \ - '{ "name": "grid_kafka", "type": "kafka", "hosts": [ "{{ GLOBALS.manager }}:9092", "{{ GLOBALS.manager_ip }}:9092" ], "is_default": false, "is_default_monitoring": false, "config_yaml": "", "ssl": { "certificate_authorities": [ $KAFKACA ], "certificate": $KAFKACRT, "key": $KAFKAKEY, "verification_mode": "full" }, "proxy_id": null, "client_id": "Elastic", "version": $KAFKA_OUTPUT_VERSION, "compression": "none", "auth_type": "ssl", "partition": "round_robin", "round_robin": { "group_events": 1 }, "topics": [ { "topic": "zeek-logs", "when": { "type": "equals", "condition": "event.module:zeek" } }, { "topic": "suricata-logs", "when": { "type": "equals", "condition": "event.module:suricata" } }, { "topic": "strelka-logs", "when": { "type": "equals", "condition": "event.module:strelka" } }, { "topic": "opencanary-logs", "when": { "type": "equals", "condition": "event.module:opencanary" } }, { "topic": "system-logs", "when": { "type": "equals", "condition": "event.module:system" } }, { "topic": "kratos-logs", "when": { "type": "equals", "condition": "event.module:kratos" } }, { "topic": "soc-logs", "when": { "type": "equals", "condition": "event.module:soc" } }, { "topic": "rita-logs", "when": { "type": "equals", "condition": "event.module:rita" } }, { "topic": "default-logs" } ], "headers": [ { "key": "", "value": "" } ], "timeout": 30, "broker_timeout": 30, "required_acks": 1 }' + '{ "name": "grid-kafka", "id": "so-manager_kafka", "type": "kafka", "hosts": [ "{{ GLOBALS.manager }}:9092", "{{ GLOBALS.manager_ip }}:9092" ], "is_default": false, "is_default_monitoring": false, "config_yaml": "", "ssl": { "certificate_authorities": [ $KAFKACA ], "certificate": $KAFKACRT, "key": $KAFKAKEY, "verification_mode": "full" }, "proxy_id": null, "client_id": "Elastic", "version": $KAFKA_OUTPUT_VERSION, "compression": "none", "auth_type": "ssl", "partition": "round_robin", "round_robin": { "group_events": 1 }, "topics": [ { "topic": "zeek-logs", "when": { "type": "equals", "condition": "event.module:zeek" } }, { "topic": "suricata-logs", "when": { "type": "equals", "condition": "event.module:suricata" } }, { "topic": "strelka-logs", "when": { "type": "equals", "condition": "event.module:strelka" } }, { "topic": "opencanary-logs", "when": { "type": "equals", "condition": "event.module:opencanary" } }, { "topic": "system-logs", "when": { "type": "equals", "condition": "event.module:system" } }, { "topic": "kratos-logs", "when": { "type": "equals", "condition": "event.module:kratos" } }, { "topic": "soc-logs", "when": { "type": "equals", "condition": "event.module:soc" } }, { "topic": "rita-logs", "when": { "type": "equals", "condition": "event.module:rita" } }, { "topic": "default-logs" } ], "headers": [ { "key": "", "value": "" } ], "timeout": 30, "broker_timeout": 30, "required_acks": 1 }' ) curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" {% endif %} From 84db82852c7825f5c0d145966e19b9496963f158 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Tue, 30 Apr 2024 15:14:56 -0400 Subject: [PATCH 461/777] annotation updates for custom settings --- salt/firewall/soc_firewall.yaml | 7 +++ salt/logstash/soc_logstash.yaml | 3 + salt/suricata/soc_suricata.yaml | 102 +++++++++----------------------- salt/zeek/soc_zeek.yaml | 4 +- 4 files changed, 42 insertions(+), 74 deletions(-) diff --git a/salt/firewall/soc_firewall.yaml b/salt/firewall/soc_firewall.yaml index 522684e07..69093dee7 100644 --- a/salt/firewall/soc_firewall.yaml +++ b/salt/firewall/soc_firewall.yaml @@ -7,6 +7,7 @@ firewall: multiline: True regex: ^(([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?)?$ regexFailureMessage: You must enter a valid IP address or CIDR. + duplicates: True anywhere: &hostgroupsettingsadv description: List of IP or CIDR blocks to allow access to this hostgroup. forcedType: "[]string" @@ -15,6 +16,7 @@ firewall: advanced: True regex: ^(([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?)?$ regexFailureMessage: You must enter a valid IP address or CIDR. + duplicates: True beats_endpoint: *hostgroupsettings beats_endpoint_ssl: *hostgroupsettings dockernet: &ROhostgroupsettingsadv @@ -53,6 +55,7 @@ firewall: multiline: True regex: ^(([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?)?$ regexFailureMessage: You must enter a valid IP address or CIDR. + duplicates: True customhostgroup1: *customhostgroupsettings customhostgroup2: *customhostgroupsettings customhostgroup3: *customhostgroupsettings @@ -70,12 +73,14 @@ firewall: helpLink: firewall.html advanced: True multiline: True + duplicates: True udp: &udpsettings description: List of UDP ports for this port group. forcedType: "[]string" helpLink: firewall.html advanced: True multiline: True + duplicates: True agrules: tcp: *tcpsettings udp: *udpsettings @@ -187,6 +192,7 @@ firewall: multiline: True forcedType: "[]string" helpLink: firewall.html + duplicates: True sensor: portgroups: *portgroupsdocker searchnode: @@ -240,6 +246,7 @@ firewall: multiline: True forcedType: "[]string" helpLink: firewall.html + duplicates: True dockernet: portgroups: *portgroupshost localhost: diff --git a/salt/logstash/soc_logstash.yaml b/salt/logstash/soc_logstash.yaml index 3172ff7c5..cc81d3103 100644 --- a/salt/logstash/soc_logstash.yaml +++ b/salt/logstash/soc_logstash.yaml @@ -10,6 +10,7 @@ logstash: helpLink: logstash.html multiline: True forcedType: "[]string" + duplicates: True receiver: *assigned_pipelines heavynode: *assigned_pipelines searchnode: *assigned_pipelines @@ -23,6 +24,7 @@ logstash: helpLink: logstash.html multiline: True forcedType: "[]string" + duplicates: True fleet: *defined_pipelines manager: *defined_pipelines search: *defined_pipelines @@ -38,6 +40,7 @@ logstash: multiline: True forcedType: string helpLink: logstash.html + duplicates: True custom002: *pipeline_config custom003: *pipeline_config custom004: *pipeline_config diff --git a/salt/suricata/soc_suricata.yaml b/salt/suricata/soc_suricata.yaml index a1847167c..78c28f9e4 100644 --- a/salt/suricata/soc_suricata.yaml +++ b/salt/suricata/soc_suricata.yaml @@ -148,84 +148,40 @@ suricata: helpLink: suricata.html vars: address-groups: - HOME_NET: - description: List of hosts or networks. + HOME_NET: &suriaddressgroup + description: Assign a list of hosts, or networks, using CIDR notation, to this Suricata variable. The variable can then be re-used within Suricata rules. This allows for a single adjustment to the variable that will then affect all rules referencing the variable. regex: ^(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\/([0-9]|[1-2][0-9]|3[0-2]))?$|^((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?))|:))|(([0-9A-Fa-f]{1,4}:){5}((:[0-9A-Fa-f]{1,4}){1,2}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){4}((:[0-9A-Fa-f]{1,4}){1,3}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){3}((:[0-9A-Fa-f]{1,4}){1,4}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){2}((:[0-9A-Fa-f]{1,4}){1,5}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){1}((:[0-9A-Fa-f]{1,4}){1,6}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(:((:[0-9A-Fa-f]{1,4}){1,7}|:)))(\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8]))?$ regexFailureMessage: You must enter a valid IP address or CIDR. helpLink: suricata.html - EXTERNAL_NET: - description: List of hosts or networks. - helpLink: suricata.html - HTTP_SERVERS: - description: List of hosts or networks. - helpLink: suricata.html - SMTP_SERVERS: - description: List of hosts or networks. - helpLink: suricata.html - SQL_SERVERS: - description: List of hosts or networks. - helpLink: suricata.html - DNS_SERVERS: - description: List of hosts or networks. - helpLink: suricata.html - TELNET_SERVERS: - description: List of hosts or networks. - helpLink: suricata.html - AIM_SERVERS: - description: List of hosts or networks. - helpLink: suricata.html - DC_SERVERS: - description: List of hosts or networks. - helpLink: suricata.html - DNP3_SERVER: - description: List of hosts or networks. - helpLink: suricata.html - DNP3_CLIENT: - description: List of hosts or networks. - helpLink: suricata.html - MODBUS_CLIENT: - description: List of hosts or networks. - helpLink: suricata.html - MODBUS_SERVER: - description: List of hosts or networks. - helpLink: suricata.html - ENIP_CLIENT: - description: List of hosts or networks. - helpLink: suricata.html - ENIP_SERVER: - description: List of hosts or networks. - helpLink: suricata.html + duplicates: True + EXTERNAL_NET: *suriaddressgroup + HTTP_SERVERS: *suriaddressgroup + SMTP_SERVERS: *suriaddressgroup + SQL_SERVERS: *suriaddressgroup + DNS_SERVERS: *suriaddressgroup + TELNET_SERVERS: *suriaddressgroup + AIM_SERVERS: *suriaddressgroup + DC_SERVERS: *suriaddressgroup + DNP3_SERVER: *suriaddressgroup + DNP3_CLIENT: *suriaddressgroup + MODBUS_CLIENT: *suriaddressgroup + MODBUS_SERVER: *suriaddressgroup + ENIP_CLIENT: *suriaddressgroup + ENIP_SERVER: *suriaddressgroup port-groups: - HTTP_PORTS: - description: List of ports to look for HTTP traffic on. - helpLink: suricata.html - SHELLCODE_PORTS: - description: List of ports to look for SHELLCODE traffic on. - helpLink: suricata.html - ORACLE_PORTS: - description: List of ports to look for ORACLE traffic on. - helpLink: suricata.html - SSH_PORTS: - description: List of ports to look for SSH traffic on. - helpLink: suricata.html - DNP3_PORTS: - description: List of ports to look for DNP3 traffic on. - helpLink: suricata.html - MODBUS_PORTS: - description: List of ports to look for MODBUS traffic on. - helpLink: suricata.html - FILE_DATA_PORTS: - description: List of ports to look for FILE_DATA traffic on. - helpLink: suricata.html - FTP_PORTS: - description: List of ports to look for FTP traffic on. - helpLink: suricata.html - VXLAN_PORTS: - description: List of ports to look for VXLAN traffic on. - helpLink: suricata.html - TEREDO_PORTS: - description: List of ports to look for TEREDO traffic on. + HTTP_PORTS: &suriportgroup + description: Assign a list of network port numbers to this Suricata variable. The variable can then be re-used within Suricata rules. This allows for a single adjustment to the variable that will then affect all rules referencing the variable. helpLink: suricata.html + duplicates: True + SHELLCODE_PORTS: *suriportgroup + ORACLE_PORTS: *suriportgroup + SSH_PORTS: *suriportgroup + DNP3_PORTS: *suriportgroup + MODBUS_PORTS: *suriportgroup + FILE_DATA_PORTS: *suriportgroup + FTP_PORTS: *suriportgroup + VXLAN_PORTS: *suriportgroup + TEREDO_PORTS: *suriportgroup outputs: eve-log: types: diff --git a/salt/zeek/soc_zeek.yaml b/salt/zeek/soc_zeek.yaml index bd5d88116..021bf29ea 100644 --- a/salt/zeek/soc_zeek.yaml +++ b/salt/zeek/soc_zeek.yaml @@ -19,13 +19,14 @@ zeek: helpLink: zeek.html networks: HOME_NET: - description: List of IP or CIDR blocks to define as the HOME_NET. + description: List of IP or CIDR blocks to define as the for this Zeek network alias. forcedType: "[]string" advanced: False helpLink: zeek.html multiline: True regex: ^(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\/([0-9]|[1-2][0-9]|3[0-2]))?$|^((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?))|:))|(([0-9A-Fa-f]{1,4}:){5}((:[0-9A-Fa-f]{1,4}){1,2}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){4}((:[0-9A-Fa-f]{1,4}){1,3}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){3}((:[0-9A-Fa-f]{1,4}){1,4}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){2}((:[0-9A-Fa-f]{1,4}){1,5}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){1}((:[0-9A-Fa-f]{1,4}){1,6}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(:((:[0-9A-Fa-f]{1,4}){1,7}|:)))(\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8]))?$ regexFailureMessage: You must enter a valid IP address or CIDR. + duplicates: True node: lb_procs: description: Contains the number of CPU cores or workers used by Zeek. This setting should only be applied to individual nodes and will be ignored if CPU affinity is enabled. @@ -60,6 +61,7 @@ zeek: file: True global: True advanced: True + duplicates: True file_extraction: description: Contains a list of file or MIME types Zeek will extract from the network streams. Values must adhere to the following format - {"MIME_TYPE":"FILE_EXTENSION"} helpLink: zeek.html From bb49944b9613402cffdfa9ab0ebe4a88a5f6dc5d Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 30 Apr 2024 16:47:40 -0400 Subject: [PATCH 462/777] Setup elastic fleet rollover from logstash -> kafka output policy Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- .../so-elastic-fleet-outputs-update | 146 ++++++++++++------ .../tools/sbin_jinja/so-elastic-fleet-setup | 3 +- 2 files changed, 96 insertions(+), 53 deletions(-) diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update index eb5ccc1ed..4d2867fc7 100644 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update @@ -21,64 +21,104 @@ function update_logstash_outputs() { # Update Logstash Outputs curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq } +function update_kafka_outputs() { + # Make sure SSL configuration is included in policy updates for Kafka output. SSL is configured in so-elastic-fleet-setup + SSL_CONFIG=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" | jq -r '.item.ssl') -# Get current list of Logstash Outputs -RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash') + JSON_STRING=$(jq -n \ + --arg UPDATEDLIST "$NEW_LIST_JSON" \ + --argjson SSL_CONFIG "$SSL_CONFIG" \ + '{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG}') + # Update Kafka outputs + curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq +} -# Check to make sure that the server responded with good data - else, bail from script -CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON") -if [ "$CHECKSUM" != "so-manager_logstash" ]; then - printf "Failed to query for current Logstash Outputs..." - exit 1 -fi +{% if GLOBALS.pipeline == "KAFKA" %} + # Get current list of Kafka Outputs + RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_kafka') -# Get the current list of Logstash outputs & hash them -CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON") -CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}') + # Check to make sure that the server responded with good data - else, bail from script + CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON") + if [ "$CHECKSUM" != "so-manager_kafka" ]; then + printf "Failed to query for current Kafka Outputs..." + exit 1 + fi -declare -a NEW_LIST=() + # Get the current list of kafka outputs & hash them + CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON") + CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}') + + declare -a NEW_LIST=() + + # Query for the current Grid Nodes that are running kafka + KAFKANODES=$(salt-call --out=json pillar.get kafka:nodes | jq '.local') + + # Query for Kafka nodes with Broker role and add hostname to list + while IFS= read -r line; do + NEW_LIST+=("$line") + done < <(jq -r 'to_entries | .[] | select(.value.role | contains("broker")) | .key + ":9092"' <<< $KAFKANODES) + + {# If global pipeline isn't set to KAFKA then assume default of REDIS / logstash #} +{% else %} + # Get current list of Logstash Outputs + RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash') + + # Check to make sure that the server responded with good data - else, bail from script + CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON") + if [ "$CHECKSUM" != "so-manager_logstash" ]; then + printf "Failed to query for current Logstash Outputs..." + exit 1 + fi + + # Get the current list of Logstash outputs & hash them + CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON") + CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}') + + declare -a NEW_LIST=() + + {# If we select to not send to manager via SOC, then omit the code that adds manager to NEW_LIST #} + {% if ELASTICFLEETMERGED.enable_manager_output %} + # Create array & add initial elements + if [ "{{ GLOBALS.hostname }}" = "{{ GLOBALS.url_base }}" ]; then + NEW_LIST+=("{{ GLOBALS.url_base }}:5055") + else + NEW_LIST+=("{{ GLOBALS.url_base }}:5055" "{{ GLOBALS.hostname }}:5055") + fi + {% endif %} + + # Query for FQDN entries & add them to the list + {% if ELASTICFLEETMERGED.config.server.custom_fqdn | length > 0 %} + CUSTOMFQDNLIST=('{{ ELASTICFLEETMERGED.config.server.custom_fqdn | join(' ') }}') + readarray -t -d ' ' CUSTOMFQDN < <(printf '%s' "$CUSTOMFQDNLIST") + for CUSTOMNAME in "${CUSTOMFQDN[@]}" + do + NEW_LIST+=("$CUSTOMNAME:5055") + done + {% endif %} + + # Query for the current Grid Nodes that are running Logstash + LOGSTASHNODES=$(salt-call --out=json pillar.get logstash:nodes | jq '.local') + + # Query for Receiver Nodes & add them to the list + if grep -q "receiver" <<< $LOGSTASHNODES; then + readarray -t RECEIVERNODES < <(jq -r ' .receiver | keys_unsorted[]' <<< $LOGSTASHNODES) + for NODE in "${RECEIVERNODES[@]}" + do + NEW_LIST+=("$NODE:5055") + done + fi + + # Query for Fleet Nodes & add them to the list + if grep -q "fleet" <<< $LOGSTASHNODES; then + readarray -t FLEETNODES < <(jq -r ' .fleet | keys_unsorted[]' <<< $LOGSTASHNODES) + for NODE in "${FLEETNODES[@]}" + do + NEW_LIST+=("$NODE:5055") + done + fi -{# If we select to not send to manager via SOC, then omit the code that adds manager to NEW_LIST #} -{% if ELASTICFLEETMERGED.enable_manager_output %} -# Create array & add initial elements -if [ "{{ GLOBALS.hostname }}" = "{{ GLOBALS.url_base }}" ]; then - NEW_LIST+=("{{ GLOBALS.url_base }}:5055") -else - NEW_LIST+=("{{ GLOBALS.url_base }}:5055" "{{ GLOBALS.hostname }}:5055") -fi {% endif %} -# Query for FQDN entries & add them to the list -{% if ELASTICFLEETMERGED.config.server.custom_fqdn | length > 0 %} -CUSTOMFQDNLIST=('{{ ELASTICFLEETMERGED.config.server.custom_fqdn | join(' ') }}') -readarray -t -d ' ' CUSTOMFQDN < <(printf '%s' "$CUSTOMFQDNLIST") -for CUSTOMNAME in "${CUSTOMFQDN[@]}" -do - NEW_LIST+=("$CUSTOMNAME:5055") -done -{% endif %} - -# Query for the current Grid Nodes that are running Logstash -LOGSTASHNODES=$(salt-call --out=json pillar.get logstash:nodes | jq '.local') - -# Query for Receiver Nodes & add them to the list -if grep -q "receiver" <<< $LOGSTASHNODES; then - readarray -t RECEIVERNODES < <(jq -r ' .receiver | keys_unsorted[]' <<< $LOGSTASHNODES) - for NODE in "${RECEIVERNODES[@]}" - do - NEW_LIST+=("$NODE:5055") - done -fi - -# Query for Fleet Nodes & add them to the list -if grep -q "fleet" <<< $LOGSTASHNODES; then - readarray -t FLEETNODES < <(jq -r ' .fleet | keys_unsorted[]' <<< $LOGSTASHNODES) - for NODE in "${FLEETNODES[@]}" - do - NEW_LIST+=("$NODE:5055") - done -fi - # Sort & hash the new list of Logstash Outputs NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "${NEW_LIST[@]}") NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}') @@ -91,5 +131,9 @@ if [ "$NEW_HASH" = "$CURRENT_HASH" ]; then else printf "\nHashes don't match - update needed.\n" printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n" +{% if GLOBALS.pipeline == "KAFKA" %} + update_kafka_outputs +{% else %} update_logstash_outputs +{% endif %} fi diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup index 14ab58b45..acaec360b 100755 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup @@ -81,8 +81,7 @@ printf "\nCreate Kafka Output Config if node is not an Import or Eval install\n" {% if grains.role not in ['so-import', 'so-eval'] %} KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt) KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key) -{# KAFKACA=$(openssl x509 -in $INTCA) #} -KAFKACA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt) +KAFKACA=$(openssl x509 -in $INTCA) KAFKA_OUTPUT_VERSION="2.6.0" JSON_STRING=$( jq -n \ --arg KAFKACRT "$KAFKACRT" \ From 9a4a85e3aed340bdcab22ce54b6a4cd6805f3788 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Wed, 1 May 2024 07:54:38 -0400 Subject: [PATCH 463/777] FEATURE: Lower EVAL memory requirement to 8GB RAM #12896 --- setup/so-functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index 60908e0d4..7afc0a883 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -559,7 +559,7 @@ check_requirements() { local num_nics=${#nic_list[@]} if [[ $is_eval ]]; then - req_mem=12 + req_mem=8 req_cores=4 req_nics=2 elif [[ $is_standalone ]]; then From cef9bb148725e980933325ee86f294512438b487 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 1 May 2024 09:16:13 -0400 Subject: [PATCH 464/777] Dynamically create Kafka topics based on event.module from elastic agent logs eg. zeek-topic. Depends on Kafka brokers having auto.create.topics.enable set to true Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup index acaec360b..aacc3ebc8 100755 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup @@ -88,7 +88,7 @@ JSON_STRING=$( jq -n \ --arg KAFKAKEY "$KAFKAKEY" \ --arg KAFKACA "$KAFKACA" \ --arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \ - '{ "name": "grid-kafka", "id": "so-manager_kafka", "type": "kafka", "hosts": [ "{{ GLOBALS.manager }}:9092", "{{ GLOBALS.manager_ip }}:9092" ], "is_default": false, "is_default_monitoring": false, "config_yaml": "", "ssl": { "certificate_authorities": [ $KAFKACA ], "certificate": $KAFKACRT, "key": $KAFKAKEY, "verification_mode": "full" }, "proxy_id": null, "client_id": "Elastic", "version": $KAFKA_OUTPUT_VERSION, "compression": "none", "auth_type": "ssl", "partition": "round_robin", "round_robin": { "group_events": 1 }, "topics": [ { "topic": "zeek-logs", "when": { "type": "equals", "condition": "event.module:zeek" } }, { "topic": "suricata-logs", "when": { "type": "equals", "condition": "event.module:suricata" } }, { "topic": "strelka-logs", "when": { "type": "equals", "condition": "event.module:strelka" } }, { "topic": "opencanary-logs", "when": { "type": "equals", "condition": "event.module:opencanary" } }, { "topic": "system-logs", "when": { "type": "equals", "condition": "event.module:system" } }, { "topic": "kratos-logs", "when": { "type": "equals", "condition": "event.module:kratos" } }, { "topic": "soc-logs", "when": { "type": "equals", "condition": "event.module:soc" } }, { "topic": "rita-logs", "when": { "type": "equals", "condition": "event.module:rita" } }, { "topic": "default-logs" } ], "headers": [ { "key": "", "value": "" } ], "timeout": 30, "broker_timeout": 30, "required_acks": 1 }' + '{ "name": "grid-kafka", "id": "so-manager_kafka", "type": "kafka", "hosts": [ "{{ GLOBALS.manager }}:9092", "{{ GLOBALS.manager_ip }}:9092" ], "is_default": false, "is_default_monitoring": false, "config_yaml": "", "ssl": { "certificate_authorities": [ $KAFKACA ], "certificate": $KAFKACRT, "key": $KAFKAKEY, "verification_mode": "full" }, "proxy_id": null, "client_id": "Elastic", "version": $KAFKA_OUTPUT_VERSION, "compression": "none", "auth_type": "ssl", "partition": "round_robin", "round_robin": { "group_events": 1 }, "topics":[{"topic":"%{[event.module]}-topic","when":{"type":"regexp","condition":"event.module:.+"}},{"topic":"default-topic"}], "headers": [ { "key": "", "value": "" } ], "timeout": 30, "broker_timeout": 30, "required_acks": 1 }' ) curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" {% endif %} From eb1249618b995a83c02b2b56692703019fe3532f Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 1 May 2024 09:27:01 -0400 Subject: [PATCH 465/777] Update soup for Kafka Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/manager/tools/sbin/so-kafka-clusterid | 2 +- salt/manager/tools/sbin/soup | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/salt/manager/tools/sbin/so-kafka-clusterid b/salt/manager/tools/sbin/so-kafka-clusterid index eb0701b8b..c4e449448 100644 --- a/salt/manager/tools/sbin/so-kafka-clusterid +++ b/salt/manager/tools/sbin/so-kafka-clusterid @@ -26,4 +26,4 @@ fi if ! grep -q "^ kafka_pass:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then kafkapass=$(get_random_value) echo ' kafka_pass: '$kafkapass >> $local_salt_dir/pillar/kafka/soc_kafka.sls -fit \ No newline at end of file +fi \ No newline at end of file diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index a6f9032a5..abde1ed0a 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -448,13 +448,13 @@ post_to_2.4.70() { touch /opt/so/saltstack/local/pillar/kafka/adv_kafka.sls echo 'kafka: ' > /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls - if ! grep -q "^ cluster_id:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then + if ! grep -q "^ cluster_id:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then kafka_cluster_id=$(get_random_value 22) - echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls + echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls - if ! grep -q "^ certpass:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then + if ! grep -q "^ kafka_pass:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then kafkapass=$(get_random_value) - echo ' certpass: '$kafkapass >> $local_salt_dir/pillar/kafka/soc_kafka.sls + echo ' kafka_pass: '$kafkapass >> $local_salt_dir/pillar/kafka/soc_kafka.sls fi POSTVERSION=2.4.70 From 87c6d0a820f97eb9a05b5469cbce1a2c0f2c9ca7 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 1 May 2024 09:29:36 -0400 Subject: [PATCH 466/777] zeek networks will only ever have one HOME_NETWORKS setting --- salt/zeek/soc_zeek.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/zeek/soc_zeek.yaml b/salt/zeek/soc_zeek.yaml index 021bf29ea..ea2c948ba 100644 --- a/salt/zeek/soc_zeek.yaml +++ b/salt/zeek/soc_zeek.yaml @@ -26,7 +26,6 @@ zeek: multiline: True regex: ^(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\/([0-9]|[1-2][0-9]|3[0-2]))?$|^((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?))|:))|(([0-9A-Fa-f]{1,4}:){5}((:[0-9A-Fa-f]{1,4}){1,2}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){4}((:[0-9A-Fa-f]{1,4}){1,3}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){3}((:[0-9A-Fa-f]{1,4}){1,4}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){2}((:[0-9A-Fa-f]{1,4}){1,5}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){1}((:[0-9A-Fa-f]{1,4}){1,6}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(:((:[0-9A-Fa-f]{1,4}){1,7}|:)))(\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8]))?$ regexFailureMessage: You must enter a valid IP address or CIDR. - duplicates: True node: lb_procs: description: Contains the number of CPU cores or workers used by Zeek. This setting should only be applied to individual nodes and will be ignored if CPU affinity is enabled. From d0e140cf7b2bd92bbcc6fb6bc2bab3f69ccdbded Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 1 May 2024 09:30:52 -0400 Subject: [PATCH 467/777] zeek networks will only ever have one HOME_NETWORKS setting --- salt/zeek/soc_zeek.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/zeek/soc_zeek.yaml b/salt/zeek/soc_zeek.yaml index ea2c948ba..9a41f3daf 100644 --- a/salt/zeek/soc_zeek.yaml +++ b/salt/zeek/soc_zeek.yaml @@ -19,7 +19,7 @@ zeek: helpLink: zeek.html networks: HOME_NET: - description: List of IP or CIDR blocks to define as the for this Zeek network alias. + description: List of IP or CIDR blocks to define as the HOME_NET forcedType: "[]string" advanced: False helpLink: zeek.html From 66563a4da0c6b85f767b2ba9184df2ef4952d206 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 1 May 2024 09:31:11 -0400 Subject: [PATCH 468/777] zeek networks will only ever have one HOME_NETWORKS setting --- salt/zeek/soc_zeek.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/zeek/soc_zeek.yaml b/salt/zeek/soc_zeek.yaml index 9a41f3daf..1594eed58 100644 --- a/salt/zeek/soc_zeek.yaml +++ b/salt/zeek/soc_zeek.yaml @@ -19,7 +19,7 @@ zeek: helpLink: zeek.html networks: HOME_NET: - description: List of IP or CIDR blocks to define as the HOME_NET + description: List of IP or CIDR blocks to define as the HOME_NET. forcedType: "[]string" advanced: False helpLink: zeek.html From 63f3e23e2b00af48a3e3789657ed1c3feee488be Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 1 May 2024 09:54:19 -0400 Subject: [PATCH 469/777] soup typo Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/manager/tools/sbin/soup | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index abde1ed0a..16f4d9076 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -451,6 +451,7 @@ post_to_2.4.70() { if ! grep -q "^ cluster_id:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then kafka_cluster_id=$(get_random_value 22) echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls + fi if ! grep -q "^ kafka_pass:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then kafkapass=$(get_random_value) From 6b60e85a337f11ac8559740ff90bae3a43d0507a Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 1 May 2024 10:15:26 -0400 Subject: [PATCH 470/777] Make kafka configuration changes prior to 2.4.70 upgrade Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/manager/tools/sbin/soup | 41 ++++++++++++++++++------------------ 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 16f4d9076..6e4820717 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -438,26 +438,7 @@ post_to_2.4.60() { } post_to_2.4.70() { - # Global pipeline changes to REDIS or KAFKA - echo "Removing global.pipeline pillar configuration" - sed -i '/pipeline:/d' /opt/so/saltstack/local/pillar/global/soc_global.sls - - # Kafka configuration - mkdir -p /opt/so/saltstack/local/pillar/kafka - touch /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls - touch /opt/so/saltstack/local/pillar/kafka/adv_kafka.sls - echo 'kafka: ' > /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls - - if ! grep -q "^ cluster_id:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then - kafka_cluster_id=$(get_random_value 22) - echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls - fi - - if ! grep -q "^ kafka_pass:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then - kafkapass=$(get_random_value) - echo ' kafka_pass: '$kafkapass >> $local_salt_dir/pillar/kafka/soc_kafka.sls - fi - + echo "Nothing to apply" POSTVERSION=2.4.70 } @@ -603,6 +584,26 @@ up_to_2.4.60() { up_to_2.4.70() { playbook_migration toggle_telemetry + + # Kafka configuration changes + + # Global pipeline changes to REDIS or KAFKA + echo "Removing global.pipeline pillar configuration" + sed -i '/pipeline:/d' /opt/so/saltstack/local/pillar/global/soc_global.sls + # Kafka pillars + mkdir -p /opt/so/saltstack/local/pillar/kafka + touch /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls + touch /opt/so/saltstack/local/pillar/kafka/adv_kafka.sls + echo 'kafka: ' > /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls + if ! grep -q "^ cluster_id:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then + kafka_cluster_id=$(get_random_value 22) + echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls + fi + if ! grep -q "^ kafka_pass:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then + kafkapass=$(get_random_value) + echo ' kafka_pass: '$kafkapass >> $local_salt_dir/pillar/kafka/soc_kafka.sls + fi + INSTALLEDVERSION=2.4.70 } From 84abfa688181c0eb878e26c7e87cdce79861825c Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 1 May 2024 10:45:05 -0400 Subject: [PATCH 471/777] Remove check for existing value since Kafka pillar is made empty on upgrade Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/manager/tools/sbin/soup | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 6e4820717..4c328b373 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -595,14 +595,10 @@ up_to_2.4.70() { touch /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls touch /opt/so/saltstack/local/pillar/kafka/adv_kafka.sls echo 'kafka: ' > /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls - if ! grep -q "^ cluster_id:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then - kafka_cluster_id=$(get_random_value 22) - echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls - fi - if ! grep -q "^ kafka_pass:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then - kafkapass=$(get_random_value) - echo ' kafka_pass: '$kafkapass >> $local_salt_dir/pillar/kafka/soc_kafka.sls - fi + kafka_cluster_id=$(get_random_value 22) + echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls + kafkapass=$(get_random_value) + echo ' kafka_pass: '$kafkapass >> $local_salt_dir/pillar/kafka/soc_kafka.sls INSTALLEDVERSION=2.4.70 } From de0af58cf8328ccc1c58886a9b0166b3cf67286d Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 1 May 2024 10:45:46 -0400 Subject: [PATCH 472/777] Write out Kafka pillar path Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/manager/tools/sbin/soup | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 4c328b373..436765d30 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -596,9 +596,9 @@ up_to_2.4.70() { touch /opt/so/saltstack/local/pillar/kafka/adv_kafka.sls echo 'kafka: ' > /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka_cluster_id=$(get_random_value 22) - echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls + echo ' cluster_id: '$kafka_cluster_id >> /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafkapass=$(get_random_value) - echo ' kafka_pass: '$kafkapass >> $local_salt_dir/pillar/kafka/soc_kafka.sls + echo ' kafka_pass: '$kafkapass >> /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls INSTALLEDVERSION=2.4.70 } From 6294f751ee3a0b2cad295a6b9b2e0294a4460dfa Mon Sep 17 00:00:00 2001 From: weslambert Date: Wed, 1 May 2024 10:59:41 -0400 Subject: [PATCH 473/777] Cold min_age to 60d --- salt/elasticsearch/defaults.yaml | 452 +++++++++++++++---------------- 1 file changed, 226 insertions(+), 226 deletions(-) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index 53340340f..156483b03 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -152,7 +152,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -316,7 +316,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -432,7 +432,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -544,7 +544,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -649,7 +649,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -762,7 +762,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -870,7 +870,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -933,7 +933,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -977,7 +977,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1021,7 +1021,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1065,7 +1065,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1109,7 +1109,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1153,7 +1153,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1197,7 +1197,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1241,7 +1241,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1285,7 +1285,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1329,7 +1329,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1373,7 +1373,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1417,7 +1417,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1461,7 +1461,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1505,7 +1505,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1549,7 +1549,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1593,7 +1593,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1637,7 +1637,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1681,7 +1681,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1725,7 +1725,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1769,7 +1769,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1813,7 +1813,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1857,7 +1857,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1901,7 +1901,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1945,7 +1945,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -1989,7 +1989,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2033,7 +2033,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2077,7 +2077,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2121,7 +2121,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2165,7 +2165,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2209,7 +2209,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2253,7 +2253,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2297,7 +2297,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2341,7 +2341,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2385,7 +2385,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2429,7 +2429,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2473,7 +2473,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2517,7 +2517,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2561,7 +2561,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2605,7 +2605,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2649,7 +2649,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2693,7 +2693,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2737,7 +2737,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2781,7 +2781,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2825,7 +2825,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2869,7 +2869,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2913,7 +2913,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -2957,7 +2957,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3001,7 +3001,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3045,7 +3045,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3089,7 +3089,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3133,7 +3133,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3177,7 +3177,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3221,7 +3221,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3265,7 +3265,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3309,7 +3309,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3353,7 +3353,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3397,7 +3397,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3441,7 +3441,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3485,7 +3485,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3529,7 +3529,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3573,7 +3573,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3635,7 +3635,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3696,7 +3696,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3757,7 +3757,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3815,7 +3815,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3871,7 +3871,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3927,7 +3927,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -3980,7 +3980,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4038,7 +4038,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4094,7 +4094,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4150,7 +4150,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4211,7 +4211,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4267,7 +4267,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4323,7 +4323,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4379,7 +4379,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4435,7 +4435,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4491,7 +4491,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4547,7 +4547,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4603,7 +4603,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4659,7 +4659,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4715,7 +4715,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4759,7 +4759,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4803,7 +4803,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4847,7 +4847,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4891,7 +4891,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4935,7 +4935,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -4979,7 +4979,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5023,7 +5023,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5067,7 +5067,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5111,7 +5111,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5155,7 +5155,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5199,7 +5199,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5243,7 +5243,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5287,7 +5287,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5331,7 +5331,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5375,7 +5375,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5419,7 +5419,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5463,7 +5463,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5507,7 +5507,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5551,7 +5551,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5595,7 +5595,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5639,7 +5639,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5683,7 +5683,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5727,7 +5727,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5771,7 +5771,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5815,7 +5815,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5859,7 +5859,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5903,7 +5903,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5947,7 +5947,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -5991,7 +5991,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6035,7 +6035,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6079,7 +6079,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6123,7 +6123,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6167,7 +6167,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6211,7 +6211,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6255,7 +6255,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6299,7 +6299,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6343,7 +6343,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6387,7 +6387,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6431,7 +6431,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6475,7 +6475,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6519,7 +6519,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6563,7 +6563,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6607,7 +6607,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6651,7 +6651,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6695,7 +6695,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6739,7 +6739,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6783,7 +6783,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6827,7 +6827,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6871,7 +6871,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6915,7 +6915,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -6959,7 +6959,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7003,7 +7003,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7047,7 +7047,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7091,7 +7091,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7135,7 +7135,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7179,7 +7179,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7223,7 +7223,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7267,7 +7267,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7311,7 +7311,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7355,7 +7355,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7399,7 +7399,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7443,7 +7443,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7487,7 +7487,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7531,7 +7531,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7575,7 +7575,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7619,7 +7619,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7663,7 +7663,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7741,7 +7741,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7785,7 +7785,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7829,7 +7829,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7873,7 +7873,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7917,7 +7917,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -7961,7 +7961,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8005,7 +8005,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8049,7 +8049,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8093,7 +8093,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8137,7 +8137,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8181,7 +8181,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8225,7 +8225,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8269,7 +8269,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8313,7 +8313,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8357,7 +8357,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8401,7 +8401,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8445,7 +8445,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8489,7 +8489,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8533,7 +8533,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8577,7 +8577,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8621,7 +8621,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8666,7 +8666,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8711,7 +8711,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8756,7 +8756,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8801,7 +8801,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8846,7 +8846,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8890,7 +8890,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8934,7 +8934,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -8978,7 +8978,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9022,7 +9022,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9066,7 +9066,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9110,7 +9110,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9154,7 +9154,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9198,7 +9198,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9242,7 +9242,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9286,7 +9286,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9330,7 +9330,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9374,7 +9374,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9418,7 +9418,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9462,7 +9462,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9506,7 +9506,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9550,7 +9550,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9594,7 +9594,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9638,7 +9638,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9682,7 +9682,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9726,7 +9726,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9770,7 +9770,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9814,7 +9814,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9858,7 +9858,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9902,7 +9902,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9946,7 +9946,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -9990,7 +9990,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -10034,7 +10034,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -10078,7 +10078,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -10122,7 +10122,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -10166,7 +10166,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -10210,7 +10210,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -10254,7 +10254,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -10298,7 +10298,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -10342,7 +10342,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -10386,7 +10386,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -10430,7 +10430,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -10474,7 +10474,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -10518,7 +10518,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -10565,7 +10565,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -10675,7 +10675,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -10785,7 +10785,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -10897,7 +10897,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -11008,7 +11008,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -11120,7 +11120,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} @@ -11233,7 +11233,7 @@ elasticsearch: actions: set_priority: priority: 0 - min_age: 30d + min_age: 60d delete: actions: delete: {} From fe2edeb2fb827cad279149974687dd2b36f0bc5d Mon Sep 17 00:00:00 2001 From: weslambert Date: Wed, 1 May 2024 11:01:59 -0400 Subject: [PATCH 474/777] 30d to 60d --- salt/elasticsearch/soc_elasticsearch.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/elasticsearch/soc_elasticsearch.yaml b/salt/elasticsearch/soc_elasticsearch.yaml index 42262a178..cc92493fb 100644 --- a/salt/elasticsearch/soc_elasticsearch.yaml +++ b/salt/elasticsearch/soc_elasticsearch.yaml @@ -120,7 +120,7 @@ elasticsearch: helpLink: elasticsearch.html cold: min_age: - description: Minimum age of index. ex. 30d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. + description: Minimum age of index. ex. 60d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. regex: ^[0-9]{1,5}d$ forcedType: string global: True @@ -302,7 +302,7 @@ elasticsearch: helpLink: elasticsearch.html cold: min_age: - description: Minimum age of index. ex. 30d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. + description: Minimum age of index. ex. 60d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. regex: ^[0-9]{1,5}d$ forcedType: string global: True From c71af9127bdcb617a13dbc2a0d2718e6d3b24fbe Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 1 May 2024 11:47:38 -0400 Subject: [PATCH 475/777] mark detections settings as read-only via the UI --- salt/idstools/soc_idstools.yaml | 12 ++++++++---- salt/suricata/soc_suricata.yaml | 3 ++- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/salt/idstools/soc_idstools.yaml b/salt/idstools/soc_idstools.yaml index 0a2bf0cbf..f4389f8ba 100644 --- a/salt/idstools/soc_idstools.yaml +++ b/salt/idstools/soc_idstools.yaml @@ -19,33 +19,37 @@ idstools: helpLink: rules.html sids: disabled: - description: Contains the list of NIDS rules manually disabled across the grid. To disable a rule, add its Signature ID (SID) to the Current Grid Value box, one entry per line. To disable multiple rules, you can use regular expressions. + description: Contains the list of NIDS rules (or regex patterns) disabled across the grid. This setting is readonly; Use the Detections screen to disable rules. global: True multiline: True forcedType: "[]string" regex: \d*|re:.* helpLink: managing-alerts.html + readonlyUi: True enabled: - description: Contains the list of NIDS rules manually enabled across the grid. To enable a rule, add its Signature ID (SID) to the Current Grid Value box, one entry per line. To enable multiple rules, you can use regular expressions. + description: Contains the list of NIDS rules (or regex patterns) enabled across the grid. This setting is readonly; Use the Detections screen to enable rules. global: True multiline: True forcedType: "[]string" regex: \d*|re:.* helpLink: managing-alerts.html + readonlyUi: True modify: - description: Contains the list of NIDS rules that were modified from their default values. Entries must adhere to the following format - SID "REGEX_SEARCH_TERM" "REGEX_REPLACE_TERM" + description: Contains the list of NIDS rules (SID "REGEX_SEARCH_TERM" "REGEX_REPLACE_TERM"). This setting is readonly; Use the Detections screen to modify rules. global: True multiline: True forcedType: "[]string" helpLink: managing-alerts.html + readonlyUi: True rules: local__rules: - description: Contains the list of custom NIDS rules applied to the grid. To add custom NIDS rules to the grid, enter one rule per line in the Current Grid Value box. + description: Contains the list of custom NIDS rules applied to the grid. This setting is readonly; Use the Detections screen to adjust rules. file: True global: True advanced: True title: Local Rules helpLink: local-rules.html + readonlyUi: True filters__rules: description: If you are using Suricata for metadata, then you can set custom filters for that metadata here. file: True diff --git a/salt/suricata/soc_suricata.yaml b/salt/suricata/soc_suricata.yaml index 78c28f9e4..f7c3b2920 100644 --- a/salt/suricata/soc_suricata.yaml +++ b/salt/suricata/soc_suricata.yaml @@ -4,13 +4,14 @@ suricata: helpLink: suricata.html thresholding: sids__yaml: - description: Threshold SIDS List + description: Threshold SIDS List. This setting is readonly; Use the Detections screen to modify rules. syntax: yaml file: True global: True multiline: True title: SIDS helpLink: suricata.html + readonlyUi: True classification: classification__config: description: Classifications config file. From 8cd75902f229ecb0bc2dd63a98cf78ceb28bd68f Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 1 May 2024 11:47:51 -0400 Subject: [PATCH 476/777] Update config.sls --- salt/soc/config.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/soc/config.sls b/salt/soc/config.sls index af34f5e7c..18b292b27 100644 --- a/salt/soc/config.sls +++ b/salt/soc/config.sls @@ -82,7 +82,7 @@ socmotd: crondetectionsruntime: cron.present: - - name: /usr/local/bin/so-detections-runtime-status cron + - name: /usr/local/bin/so-detections-runtime-status cron > /opt/so/log/soc/detection-sync.log 2>&1 - identifier: detections-runtime-status - user: socore - minute: '*/10' From f7223f132ace0ea0b5d5ca22963a9a86543a1faf Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 1 May 2024 12:00:39 -0400 Subject: [PATCH 477/777] Update config.sls --- salt/soc/config.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/soc/config.sls b/salt/soc/config.sls index 18b292b27..545118dd9 100644 --- a/salt/soc/config.sls +++ b/salt/soc/config.sls @@ -84,7 +84,7 @@ crondetectionsruntime: cron.present: - name: /usr/local/bin/so-detections-runtime-status cron > /opt/so/log/soc/detection-sync.log 2>&1 - identifier: detections-runtime-status - - user: socore + - user: root - minute: '*/10' - hour: '*' - daymonth: '*' From 7122709bbf9dd99c70a555f3e0bae93ba6d252ba Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 1 May 2024 12:25:34 -0400 Subject: [PATCH 478/777] set Sigma rules based on role if defined and default if not --- salt/soc/defaults.yaml | 13 ++++++++++--- salt/soc/merged.map.jinja | 8 +++++--- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 9be17bcca..b66ae3bbe 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1257,9 +1257,16 @@ soc: allowRegex: '' autoUpdateEnabled: true autoEnabledSigmaRules: - - core+critical - - securityonion-resources+critical - - securityonion-resources+high + default: + - core+critical + - securityonion-resources+critical + - securityonion-resources+high + so-eval: + - securityonion-resources+critical + - securityonion-resources+high + so-import: + - securityonion-resources+critical + - securityonion-resources+high communityRulesImportFrequencySeconds: 28800 denyRegex: '' elastAlertRulesFolder: /opt/sensoroni/elastalert diff --git a/salt/soc/merged.map.jinja b/salt/soc/merged.map.jinja index ae68dc01f..222566dba 100644 --- a/salt/soc/merged.map.jinja +++ b/salt/soc/merged.map.jinja @@ -30,9 +30,11 @@ {# since cases is not a valid soc config item and only used for the map files, remove it from being placed in the config #} {% do SOCMERGED.config.server.modules.pop('cases') %} -{# do not automatically enable Sigma rules if install is Eval or Import #} -{% if grains['role'] in ['so-eval', 'so-import'] %} - {% do SOCMERGED.config.server.modules.elastalertengine.update({'autoEnabledSigmaRules': []}) %} +{# set Sigma rules based on role if defined and default if not #} +{% if GLOBALS.role in SOCMERGED.config.server.modules.elastalertengine.autoEnabledSigmaRules %} +{% do SOCMERGED.config.server.modules.elastalertengine.update({'autoEnabledSigmaRules': SOCMERGED.config.server.modules.elastalertengine.autoEnabledSigmaRules[GLOBALS.role]}) %} +{% else %} +{% do SOCMERGED.config.server.modules.elastalertengine.update({'autoEnabledSigmaRules': SOCMERGED.config.server.modules.elastalertengine.autoEnabledSigmaRules.default}) %} {% endif %} {# remove these modules if detections is disabled #} From 252d9a53203fa7be1a12edb21c6ab1d81dce4e72 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 1 May 2024 12:51:04 -0400 Subject: [PATCH 479/777] make rule settings advanced --- salt/idstools/soc_idstools.yaml | 3 +++ salt/suricata/soc_suricata.yaml | 1 + 2 files changed, 4 insertions(+) diff --git a/salt/idstools/soc_idstools.yaml b/salt/idstools/soc_idstools.yaml index f4389f8ba..ce8b56569 100644 --- a/salt/idstools/soc_idstools.yaml +++ b/salt/idstools/soc_idstools.yaml @@ -26,6 +26,7 @@ idstools: regex: \d*|re:.* helpLink: managing-alerts.html readonlyUi: True + advanced: true enabled: description: Contains the list of NIDS rules (or regex patterns) enabled across the grid. This setting is readonly; Use the Detections screen to enable rules. global: True @@ -34,6 +35,7 @@ idstools: regex: \d*|re:.* helpLink: managing-alerts.html readonlyUi: True + advanced: true modify: description: Contains the list of NIDS rules (SID "REGEX_SEARCH_TERM" "REGEX_REPLACE_TERM"). This setting is readonly; Use the Detections screen to modify rules. global: True @@ -41,6 +43,7 @@ idstools: forcedType: "[]string" helpLink: managing-alerts.html readonlyUi: True + advanced: true rules: local__rules: description: Contains the list of custom NIDS rules applied to the grid. This setting is readonly; Use the Detections screen to adjust rules. diff --git a/salt/suricata/soc_suricata.yaml b/salt/suricata/soc_suricata.yaml index f7c3b2920..75ad1e476 100644 --- a/salt/suricata/soc_suricata.yaml +++ b/salt/suricata/soc_suricata.yaml @@ -12,6 +12,7 @@ suricata: title: SIDS helpLink: suricata.html readonlyUi: True + advanced: true classification: classification__config: description: Classifications config file. From 10c8e4203c273b90dcfa2d76f48d3595520a6b18 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 1 May 2024 12:54:21 -0400 Subject: [PATCH 480/777] Update config.sls --- salt/soc/config.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/soc/config.sls b/salt/soc/config.sls index 545118dd9..b440b07fc 100644 --- a/salt/soc/config.sls +++ b/salt/soc/config.sls @@ -82,7 +82,7 @@ socmotd: crondetectionsruntime: cron.present: - - name: /usr/local/bin/so-detections-runtime-status cron > /opt/so/log/soc/detection-sync.log 2>&1 + - name: /usr/local/bin/so-detections-runtime-status cron - identifier: detections-runtime-status - user: root - minute: '*/10' From 47ba4c0f57b7f8a0ea756cb680b883fbc8d05317 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 1 May 2024 12:55:29 -0400 Subject: [PATCH 481/777] add new annotation for soc autoEnabledSigmaRules --- salt/soc/soc_soc.yaml | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index 2001fb0c1..4b88a5f84 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -89,10 +89,13 @@ soc: advanced: True helpLink: sigma.html autoEnabledSigmaRules: - description: 'Sigma rules to automatically enable on initial import. Format is $Ruleset+$Level - for example, for the core community ruleset and critical level rules: core+critical' - global: True - advanced: True - helpLink: sigma.html + default: &autoEnabledSigmaRules + description: 'Sigma rules to automatically enable on initial import. Format is $Ruleset+$Level - for example, for the core community ruleset and critical level rules: core+critical. These will be applied based on role if defined and default if not.' + global: True + advanced: True + helpLink: sigma.html + so-eval: *autoEnabledSigmaRules + so-import: *autoEnabledSigmaRules denyRegex: description: 'Regex used to filter imported Sigma rules. Deny regex takes precedence over the Allow regex setting.' global: True From 3efdb4e5328713ab1633bf85de200a46f8d66a03 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 1 May 2024 13:01:29 -0400 Subject: [PATCH 482/777] Reconfigure logstash Kafka input - TODO: Configure what topics are pulled to searchnodes via the SOC UI Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- pillar/top.sls | 1 + salt/logstash/defaults.yaml | 1 + .../config/so/0800_input_kafka.conf.jinja | 19 +++++++++---------- 3 files changed, 11 insertions(+), 10 deletions(-) diff --git a/pillar/top.sls b/pillar/top.sls index fbb1604da..61b812cc8 100644 --- a/pillar/top.sls +++ b/pillar/top.sls @@ -226,6 +226,7 @@ base: - minions.adv_{{ grains.id }} - stig.soc_stig - soc.license + - kafka.nodes '*_receiver': - logstash.nodes diff --git a/salt/logstash/defaults.yaml b/salt/logstash/defaults.yaml index 348acb622..d82cba1ff 100644 --- a/salt/logstash/defaults.yaml +++ b/salt/logstash/defaults.yaml @@ -37,6 +37,7 @@ logstash: - so/0900_input_redis.conf.jinja - so/9805_output_elastic_agent.conf.jinja - so/9900_output_endgame.conf.jinja + - so/0800_input_kafka.conf.jinja custom0: [] custom1: [] custom2: [] diff --git a/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja b/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja index 85e6729e2..087ed7755 100644 --- a/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja +++ b/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja @@ -1,18 +1,17 @@ -{% set kafka_brokers = salt['pillar.get']('logstash:nodes:receiver', {}) %} -{% set kafka_on_mngr = salt ['pillar.get']('logstash:nodes:manager', {}) %} -{% set broker_ips = [] %} -{% for node, node_data in kafka_brokers.items() %} - {% do broker_ips.append(node_data['ip'] + ":9092") %} +{% set kafka_brokers = salt['pillar.get']('kafka:nodes', {}) %} +{% set brokers = [] %} + +{% for key, values in kafka_brokers.items() %} +{% if 'broker' in values['role'] %} +{% do brokers.append(key ~ ':9092') %} +{% endif %} {% endfor %} -{% for node, node_data in kafka_on_mngr.items() %} - {% do broker_ips.append(node_data['ip'] + ":9092") %} -{% endfor %} -{% set bootstrap_servers = "','".join(broker_ips) %} +{% set bootstrap_servers = ','.join(brokers) %} input { kafka { codec => json - topics => ['default-logs', 'kratos-logs', 'soc-logs', 'strelka-logs', 'suricata-logs', 'zeek-logs'] + topics => ['default-topic', 'kratos-topic', 'soc-topic', 'strelka-topic', 'suricata-topic', 'zeek-topic', 'rita-topic', 'opencanary-topic', 'syslog-topic'] group_id => 'searchnodes' client_id => '{{ GLOBALS.hostname }}' security_protocol => 'SSL' From e164d15ec64c6238312f2fdf1a9c25f70238c22e Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 1 May 2024 13:02:47 -0400 Subject: [PATCH 483/777] Generate different Kafka certs for different SO nodetypes Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/ssl/init.sls | 221 +++++++++++++++++++++++++++++----------------- 1 file changed, 142 insertions(+), 79 deletions(-) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index 854628949..72fc6c9a4 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -662,31 +662,27 @@ elastickeyperms: - mode: 640 - group: 930 -{%- endif %} - -{% if grains['role'] in ['so-manager', 'so-receiver', 'so-searchnode'] %} - -kafka_key: +kafka_logstash_key: x509.private_key_managed: - - name: /etc/pki/kafka.key + - name: /etc/pki/kafka-logstash.key - keysize: 4096 - backup: True - new: True - {% if salt['file.file_exists']('/etc/pki/kafka.key') -%} + {% if salt['file.file_exists']('/etc/pki/kafka-logstash.key') -%} - prereq: - - x509: /etc/pki/kafka.crt + - x509: /etc/pki/kafka-logstash.crt {%- endif %} - retry: attempts: 5 interval: 30 -kafka_crt: +kafka_logstash_crt: x509.certificate_managed: - - name: /etc/pki/kafka.crt + - name: /etc/pki/kafka-logstash.crt - ca_server: {{ ca_server }} - subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }} - signing_policy: kafka - - private_key: /etc/pki/kafka.key + - private_key: /etc/pki/kafka-logstash.key - CN: {{ GLOBALS.hostname }} - days_remaining: 0 - days_valid: 820 @@ -696,9 +692,37 @@ kafka_crt: attempts: 5 interval: 30 cmd.run: - - name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka.key -in /etc/pki/kafka.crt -export -out /etc/pki/kafka.p12 -nodes -passout pass:changeit" + - name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka-logstash.key -in /etc/pki/kafka-logstash.crt -export -out /etc/pki/kafka-logstash.p12 -nodes -passout pass:changeit" - onchanges: - - x509: /etc/pki/kafka.key + - x509: /etc/pki/kafka-logstash.key + +kafka_logstash_key_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka-logstash.key + - mode: 640 + - user: 960 + - group: 939 + +kafka_logstash_crt_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka-logstash.crt + - mode: 640 + - user: 960 + - group: 939 + +kafka_logstash_pkcs12_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka-logstash.p12 + - mode: 640 + - user: 960 + - group: 931 + +{%- endif %} + +{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone'] %} elasticfleet_kafka_key: x509.private_key_managed: @@ -734,41 +758,30 @@ elasticfleet_kafka_crt: - onchanges: - x509: elasticfleet_kafka_key -kafka_logstash_key: - x509.private_key_managed: - - name: /etc/pki/kafka-logstash.key - - keysize: 4096 - - backup: True - - new: True - {% if salt['file.file_exists']('/etc/pki/kafka-logstash.key') -%} - - prereq: - - x509: /etc/pki/kafka-logstash.crt - {%- endif %} - - retry: - attempts: 5 - interval: 30 +elasticfleet_kafka_cert_perms: + file.managed: + - replace: False + - name: /etc/pki/elasticfleet-kafka.crt + - mode: 640 + - user: 960 + - group: 939 -kafka_logstash_crt: - x509.certificate_managed: - - name: /etc/pki/kafka-logstash.crt - - ca_server: {{ ca_server }} - - subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }} - - signing_policy: kafka - - private_key: /etc/pki/kafka-logstash.key - - CN: {{ GLOBALS.hostname }} - - days_remaining: 0 - - days_valid: 820 - - backup: True - - timeout: 30 - - retry: - attempts: 5 - interval: 30 - cmd.run: - - name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka-logstash.key -in /etc/pki/kafka-logstash.crt -export -out /etc/pki/kafka-logstash.p12 -nodes -passout pass:changeit" - - onchanges: - - x509: /etc/pki/kafka-logstash.key +elasticfleet_kafka_key_perms: + file.managed: + - replace: False + - name: /etc/pki/elasticfleet-kafka.key + - mode: 640 + - user: 960 + - group: 939 + +elasticfleet_kafka_pkcs8_perms: + file.managed: + - replace: False + - name: /etc/pki/elasticfleet-kafka.p8 + - mode: 640 + - user: 960 + - group: 939 -{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-receiver'] %} kafka_client_key: x509.private_key_managed: - name: /etc/pki/kafka-client.key @@ -814,8 +827,44 @@ kafka_client_crt_perms: - mode: 640 - user: 960 - group: 939 -{% endif %} +{% endif %} + +{% if grains['role'] in ['so-manager', 'so-managersearch','so-receiver', 'so-standalone'] %} + +kafka_key: + x509.private_key_managed: + - name: /etc/pki/kafka.key + - keysize: 4096 + - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/kafka.key') -%} + - prereq: + - x509: /etc/pki/kafka.crt + {%- endif %} + - retry: + attempts: 5 + interval: 30 + +kafka_crt: + x509.certificate_managed: + - name: /etc/pki/kafka.crt + - ca_server: {{ ca_server }} + - subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }} + - signing_policy: kafka + - private_key: /etc/pki/kafka.key + - CN: {{ GLOBALS.hostname }} + - days_remaining: 0 + - days_valid: 820 + - backup: True + - timeout: 30 + - retry: + attempts: 5 + interval: 30 + cmd.run: + - name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka.key -in /etc/pki/kafka.crt -export -out /etc/pki/kafka.p12 -nodes -passout pass:changeit" + - onchanges: + - x509: /etc/pki/kafka.key kafka_key_perms: file.managed: - replace: False @@ -832,6 +881,51 @@ kafka_crt_perms: - user: 960 - group: 939 +kafka_pkcs12_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka.p12 + - mode: 640 + - user: 960 + - group: 939 + +{% endif %} +{# For automated testing standalone will need kafka-logstash key to pull logs from Kafka #} +{% if grains['role'] == 'so-standalone' %} +kafka_logstash_key: + x509.private_key_managed: + - name: /etc/pki/kafka-logstash.key + - keysize: 4096 + - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/kafka-logstash.key') -%} + - prereq: + - x509: /etc/pki/kafka-logstash.crt + {%- endif %} + - retry: + attempts: 5 + interval: 30 + +kafka_logstash_crt: + x509.certificate_managed: + - name: /etc/pki/kafka-logstash.crt + - ca_server: {{ ca_server }} + - subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }} + - signing_policy: kafka + - private_key: /etc/pki/kafka-logstash.key + - CN: {{ GLOBALS.hostname }} + - days_remaining: 0 + - days_valid: 820 + - backup: True + - timeout: 30 + - retry: + attempts: 5 + interval: 30 + cmd.run: + - name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka-logstash.key -in /etc/pki/kafka-logstash.crt -export -out /etc/pki/kafka-logstash.p12 -nodes -passout pass:changeit" + - onchanges: + - x509: /etc/pki/kafka-logstash.key + kafka_logstash_key_perms: file.managed: - replace: False @@ -855,39 +949,8 @@ kafka_logstash_pkcs12_perms: - mode: 640 - user: 960 - group: 931 - -elasticfleet_kafka_pkcs8_perms: - file.managed: - - replace: False - - name: /etc/pki/elasticfleet-kafka.p8 - - mode: 640 - - user: 960 - - group: 939 - -kafka_pkcs12_perms: - file.managed: - - replace: False - - name: /etc/pki/kafka.p12 - - mode: 640 - - user: 960 - - group: 939 - -elasticfleet_kafka_cert_perms: - file.managed: - - replace: False - - name: /etc/pki/elasticfleet-kafka.crt - - mode: 640 - - user: 960 - - group: 939 - -elasticfleet_kafka_key_perms: - file.managed: - - replace: False - - name: /etc/pki/elasticfleet-kafka.key - - mode: 640 - - user: 960 - - group: 939 {% endif %} + {% else %} {{sls}}_state_not_allowed: From 58ebbfba206a314b217fa2cf9133609d1aae6c9c Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 1 May 2024 13:03:14 -0400 Subject: [PATCH 484/777] Add kafka state to standalone highstate Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/top.sls | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/top.sls b/salt/top.sls index e4cd067c3..464f23ea5 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -141,6 +141,7 @@ base: - utility - elasticfleet - stig + - kafka '*_searchnode and G@saltversion:{{saltversion}}': - match: compound From 47ced60243d86337cf7e0e6d3133b10fb985bbd8 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 1 May 2024 14:49:51 -0400 Subject: [PATCH 485/777] Create new Kafka output policy using salt Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- .../tools/sbin_jinja/so-elastic-fleet-setup | 13 +---------- salt/kafka/elasticfleet.sls | 22 ++++++++++++++++++ salt/kafka/init.sls | 1 + .../sbin_jinja/so-kafka-fleet-output-policy | 23 +++++++++++++++++++ 4 files changed, 47 insertions(+), 12 deletions(-) create mode 100644 salt/kafka/elasticfleet.sls create mode 100644 salt/kafka/tools/sbin_jinja/so-kafka-fleet-output-policy diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup index aacc3ebc8..02624d813 100755 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup @@ -79,18 +79,7 @@ printf "\n\n" printf "\nCreate Kafka Output Config if node is not an Import or Eval install\n" {% if grains.role not in ['so-import', 'so-eval'] %} -KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt) -KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key) -KAFKACA=$(openssl x509 -in $INTCA) -KAFKA_OUTPUT_VERSION="2.6.0" -JSON_STRING=$( jq -n \ - --arg KAFKACRT "$KAFKACRT" \ - --arg KAFKAKEY "$KAFKAKEY" \ - --arg KAFKACA "$KAFKACA" \ - --arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \ - '{ "name": "grid-kafka", "id": "so-manager_kafka", "type": "kafka", "hosts": [ "{{ GLOBALS.manager }}:9092", "{{ GLOBALS.manager_ip }}:9092" ], "is_default": false, "is_default_monitoring": false, "config_yaml": "", "ssl": { "certificate_authorities": [ $KAFKACA ], "certificate": $KAFKACRT, "key": $KAFKAKEY, "verification_mode": "full" }, "proxy_id": null, "client_id": "Elastic", "version": $KAFKA_OUTPUT_VERSION, "compression": "none", "auth_type": "ssl", "partition": "round_robin", "round_robin": { "group_events": 1 }, "topics":[{"topic":"%{[event.module]}-topic","when":{"type":"regexp","condition":"event.module:.+"}},{"topic":"default-topic"}], "headers": [ { "key": "", "value": "" } ], "timeout": 30, "broker_timeout": 30, "required_acks": 1 }' - ) -curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" +salt-call state.apply kafka.elasticfleet queue=True {% endif %} # Add Manager Hostname & URL Base to Fleet Host URLs diff --git a/salt/kafka/elasticfleet.sls b/salt/kafka/elasticfleet.sls new file mode 100644 index 000000000..a91df765b --- /dev/null +++ b/salt/kafka/elasticfleet.sls @@ -0,0 +1,22 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% from 'vars/globals.map.jinja' import GLOBALS %} + +{# Create Kafka output policy if it doesn't exist #} +update_kafka_output_policy_script: + file.managed: + - name: /usr/sbin/so-kafka-fleet-output-policy + - source: salt://kafka/tools/sbin_jinja/so-kafka-fleet-output-policy + - user: root + - mode: 755 + - template: jinja + - defaults: + GLOBALS: {{ GLOBALS }} + +create_kafka_output_policy: + cmd.run: + - name: 'so-kafka-fleet-output-policy > /dev/null 2>&1' + - show_changes: false \ No newline at end of file diff --git a/salt/kafka/init.sls b/salt/kafka/init.sls index c4351ebfc..67b66c45d 100644 --- a/salt/kafka/init.sls +++ b/salt/kafka/init.sls @@ -10,6 +10,7 @@ include: {# Run kafka/nodes.sls before Kafka is enabled, so kafka nodes pillar is setup #} {% if grains.role in ['so-manager','so-managersearch', 'so-standalone'] %} - kafka.nodes + - kafka.elasticfleet {% endif %} {% if GLOBALS.pipeline == "KAFKA" and KAFKAMERGED.enabled %} - kafka.enabled diff --git a/salt/kafka/tools/sbin_jinja/so-kafka-fleet-output-policy b/salt/kafka/tools/sbin_jinja/so-kafka-fleet-output-policy new file mode 100644 index 000000000..13f158bdd --- /dev/null +++ b/salt/kafka/tools/sbin_jinja/so-kafka-fleet-output-policy @@ -0,0 +1,23 @@ +#!/bin/bash +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +output=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs" | jq -r .items[].id) + +if ! echo "$output" | grep -q "so-manager_kafka"; then + KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt) + KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key) + KAFKACA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt) + KAFKA_OUTPUT_VERSION="2.6.0" + JSON_STRING=$( jq -n \ + --arg KAFKACRT "$KAFKACRT" \ + --arg KAFKAKEY "$KAFKAKEY" \ + --arg KAFKACA "$KAFKACA" \ + --arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \ + --arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \ + '{ "name": "grid-kafka", "id": "so-manager_kafka", "type": "kafka", "hosts": [ $MANAGER_IP ], "is_default": false, "is_default_monitoring": false, "config_yaml": "", "ssl": { "certificate_authorities": [ $KAFKACA ], "certificate": $KAFKACRT, "key": $KAFKAKEY, "verification_mode": "full" }, "proxy_id": null, "client_id": "Elastic", "version": $KAFKA_OUTPUT_VERSION, "compression": "none", "auth_type": "ssl", "partition": "round_robin", "round_robin": { "group_events": 1 }, "topics":[{"topic":"%{[event.module]}-topic","when":{"type":"regexp","condition":"event.module:.+"}},{"topic":"default-topic"}], "headers": [ { "key": "", "value": "" } ], "timeout": 30, "broker_timeout": 30, "required_acks": 1 }' + ) + curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" 2&1> /dev/null +fi \ No newline at end of file From 3285ae93665990562ab58ada8ceeb09e98fd1c84 Mon Sep 17 00:00:00 2001 From: Wes Date: Wed, 1 May 2024 20:11:56 +0000 Subject: [PATCH 486/777] Update mappings for detection fields --- .../component/so/detection-mappings.json | 23 +++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/salt/elasticsearch/templates/component/so/detection-mappings.json b/salt/elasticsearch/templates/component/so/detection-mappings.json index 105a0ead2..5e51b872b 100644 --- a/salt/elasticsearch/templates/component/so/detection-mappings.json +++ b/salt/elasticsearch/templates/component/so/detection-mappings.json @@ -20,10 +20,12 @@ "so_detection": { "properties": { "publicId": { - "type": "text" + "ignore_above": 1024, + "type": "keyword" }, "title": { - "type": "text" + "ignore_above": 1024, + "type": "keyword" }, "severity": { "ignore_above": 1024, @@ -36,6 +38,18 @@ "description": { "type": "text" }, + "category": { + "ignore_above": 1024, + "type": "keyword" + }, + "product": { + "ignore_above": 1024, + "type": "keyword" + }, + "service": { + "ignore_above": 1024, + "type": "keyword" + }, "content": { "type": "text" }, @@ -49,7 +63,8 @@ "type": "boolean" }, "tags": { - "type": "text" + "ignore_above": 1024, + "type": "keyword" }, "ruleset": { "ignore_above": 1024, @@ -136,4 +151,4 @@ "_meta": { "ecs_version": "1.12.2" } -} \ No newline at end of file +} From 1be3e6204d2456717d73d65e096207c5d5a15873 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Thu, 2 May 2024 10:38:56 -0400 Subject: [PATCH 487/777] FIX: Improve File dashboard #12914 --- salt/soc/defaults.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index b75263fa1..506c85ba5 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1732,16 +1732,16 @@ soc: - name: Host Registry Changes description: Windows Registry changes query: 'event.category: registry | groupby event.action | groupby -sankey event.action host.name | groupby host.name | groupby event.dataset event.action | groupby process.executable | groupby registry.path | groupby process.executable registry.path' - - name: Host DNS & Process Mappings + - name: Host DNS and Process Mappings description: DNS queries mapped to originating processes query: 'event.category: network AND _exists_:process.executable AND (_exists_:dns.question.name OR _exists_:dns.answers.data) | groupby host.name | groupby -sankey host.name dns.question.name | groupby dns.question.name | groupby event.dataset event.type | groupby process.executable | groupby dns.answers.data' - name: Host Process Activity description: Process activity captured on an endpoint query: 'event.category:process | groupby host.name | groupby -sankey host.name user.name* | groupby user.name | groupby event.dataset event.action | groupby process.working_directory | groupby process.executable | groupby process.command_line | groupby process.parent.executable | groupby process.parent.command_line | groupby -sankey process.parent.executable process.executable | table soc_timestamp host.name user.name process.parent.name process.name event.action process.working_directory event.dataset' - - name: Host File Activity + - name: Host File and Process Mappings description: File activity captured on an endpoint - query: 'event.category: file AND _exists_:process.executable | groupby host.name | groupby -sankey host.name process.executable | groupby process.executable | groupby event.dataset event.action event.type | groupby file.name' - - name: Host Network & Process Mappings + query: 'event.category: file AND _exists_:process.name AND _exists_:process.executable | groupby host.name | groupby -sankey host.name process.name | groupby process.name | groupby process.executable | groupby event.dataset event.action event.type | groupby file.name' + - name: Host Network and Process Mappings description: Network activity mapped to originating processes query: 'event.category: network AND _exists_:process.executable | groupby event.action | groupby -sankey event.action host.name | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby event.dataset* event.type* event.action* | groupby dns.question.name | groupby process.executable | groupby process.name | groupby source.ip | groupby destination.ip | groupby destination.port' - name: Sysmon Overview From 0822a46e94024e685fffcfb6e2eb693c94ec9097 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Thu, 2 May 2024 10:42:34 -0400 Subject: [PATCH 488/777] FIX: Improve File dashboard #12914 --- salt/soc/defaults.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 506c85ba5..ad154e9d1 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1739,7 +1739,7 @@ soc: description: Process activity captured on an endpoint query: 'event.category:process | groupby host.name | groupby -sankey host.name user.name* | groupby user.name | groupby event.dataset event.action | groupby process.working_directory | groupby process.executable | groupby process.command_line | groupby process.parent.executable | groupby process.parent.command_line | groupby -sankey process.parent.executable process.executable | table soc_timestamp host.name user.name process.parent.name process.name event.action process.working_directory event.dataset' - name: Host File and Process Mappings - description: File activity captured on an endpoint + description: File activity mapped to originating processes query: 'event.category: file AND _exists_:process.name AND _exists_:process.executable | groupby host.name | groupby -sankey host.name process.name | groupby process.name | groupby process.executable | groupby event.dataset event.action event.type | groupby file.name' - name: Host Network and Process Mappings description: Network activity mapped to originating processes From 33d1170a914b5e787cb25436bc3b306af5cbda3c Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 2 May 2024 11:58:39 -0400 Subject: [PATCH 489/777] add default pillar value for pillarWatch --- salt/salt/engines/master/pillarWatch.py | 153 ++++++++++++++++++++++++ salt/salt/files/engines.conf | 27 +++++ salt/salt/master.sls | 6 + 3 files changed, 186 insertions(+) create mode 100644 salt/salt/engines/master/pillarWatch.py diff --git a/salt/salt/engines/master/pillarWatch.py b/salt/salt/engines/master/pillarWatch.py new file mode 100644 index 000000000..f75a6bb6b --- /dev/null +++ b/salt/salt/engines/master/pillarWatch.py @@ -0,0 +1,153 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +# -*- coding: utf-8 -*- + +import logging +import re +import os +import sys +log = logging.getLogger(__name__) + +# will need this in future versions of this engine +#import salt.client +#local = salt.client.LocalClient() + +def start(fpa, interval=10): + currentPillarValue = '' + previousPillarValue = '' + + ''' + def processJinjaFile(): + log.info("pillarWatch engine: processing jinja file") + log.info(pillarFile) + log.info(__salt__['jinja.load_map'](pillarFile, 'GLOBALMERGED')) + sys.exit(0) + ''' + + def checkChangesTakeAction(): + # if the pillar value changed, then we find what actions we should take + log.debug("pillarWatch engine: checking if currentPillarValue != previousPillarValue") + if currentPillarValue != previousPillarValue: + log.info("pillarWatch engine: currentPillarValue != previousPillarValue: %s != %s" % (currentPillarValue, previousPillarValue)) + # check if the previous pillar value is defined in the pillar from -> to actions + if previousPillarValue in actions['from']: + # check if the new / current pillar value is defined under to + if currentPillarValue in actions['from'][previousPillarValue]['to']: + ACTIONS=actions['from'][previousPillarValue]['to'][currentPillarValue] + # if the new / current pillar value isn't defined under to, is there a wildcard defined + elif '*' in actions['from'][previousPillarValue]['to']: + ACTIONS=actions['from'][previousPillarValue]['to']['*'] + # no action was defined for us to take when we see the pillar change + else: + ACTIONS=['NO DEFINED ACTION FOR US TO TAKE'] + # if the previous pillar wasn't defined in the actions from, is there a wildcard defined for the pillar that we are changing from + elif '*' in actions['from']: + # is the new pillar value defined for the wildcard match + if currentPillarValue in actions['from']['*']['to']: + ACTIONS=actions['from']['*']['to'][currentPillarValue] + # if the new pillar doesn't have an action, was a wildcard defined + elif '*' in actions['from']['*']['to']: + # need more logic here for to and from + ACTIONS=actions['from']['*']['to']['*'] + else: + ACTIONS=['NO DEFINED ACTION FOR US TO TAKE'] + # a match for the previous pillar wasn't defined in the action in either the form of a direct match or wildcard + else: + ACTIONS=['NO DEFINED ACTION FOR US TO TAKE'] + log.debug("pillarWatch engine: all defined actions: %s" % actions['from']) + log.debug("pillarWatch engine: ACTIONS: %s chosen based on previousPillarValue: %s switching to currentPillarValue: %s" % (ACTIONS, previousPillarValue, currentPillarValue)) + for action in ACTIONS: + log.info("pillarWatch engine: action: %s" % action) + if action != 'NO DEFINED ACTION FOR US TO TAKE': + for saltModule, args in action.items(): + log.debug("pillarWatch engine: saltModule: %s" % saltModule) + log.debug("pillarWatch engine: args: %s" % args) + #__salt__[saltModule](**args) + actionReturn = __salt__[saltModule](**args) + log.info("pillarWatch engine: actionReturn: %s" % actionReturn) + + + log.info("pillarWatch engine: ##### checking watched pillars for changes #####") + + # try to open the file that stores the previous runs data + # if the file doesn't exist, create a blank one + try: + # maybe change this location + dataFile = open("/opt/so/state/pillarWatch.txt", "r+") + except FileNotFoundError: + log.warn("pillarWatch engine: No previous pillarWatch data saved") + dataFile = open("/opt/so/state/pillarWatch.txt", "w+") + + df = dataFile.read() + for i in fpa: + log.trace("pillarWatch engine: files: %s" % i['files']) + log.trace("pillarWatch engine: pillar: %s" % i['pillar']) + log.trace("pillarWatch engine: actions: %s" % i['actions']) + pillarFiles = i['files'] + pillar = i['pillar'] + default = str(i['default']) + actions = i['actions'] + # these are the keys that we are going to look for as we traverse the pillarFiles + patterns = pillar.split(".") + # check the pillar files in reveresed order to replicate the same hierarchy as the pillar top file + for pillarFile in reversed(pillarFiles): + currentPillarValue = default + previousPillarValue = '' + ''' + if 'jinja' in os.path.splitext(pillarFile)[1]: + processJinjaFile() + ''' + # this var is used to track how many times the pattern has been found in the pillar file so that we can access the proper index later + patternFound = 0 + with open(pillarFile, "r") as file: + log.debug("pillarWatch engine: checking file: %s" % pillarFile) + for line in file: + log.trace("pillarWatch engine: inspecting line: %s in file: %s" % (line, file)) + log.trace("pillarWatch engine: looking for: %s" % patterns[patternFound]) + # since we are looping line by line through a pillar file, the next line will check if each line matches the progression of keys through the pillar + # ex. if we are looking for the value of global.pipeline, then this will loop through the pillar file until 'global' is found, then it will look + # for pipeline. once pipeline is found, it will record the value + if re.search('^' + patterns[patternFound] + ':', line.strip()): + # strip the newline because it makes the logs u-g-l-y + log.debug("pillarWatch engine: found: %s" % line.strip('\n')) + patternFound += 1 + # we have found the final key in the pillar that we are looking for, get the previous value and current value + if patternFound == len(patterns): + currentPillarValue = str(line.split(":")[1]).strip() + # we have found the pillar so we dont need to loop through the file anymore + break + + # if key and value was found in the first file, then we don't want to look in + # any more files since we use the first file as the source of truth. + if patternFound == len(patterns): + break + + # at this point, df is equal to the contents of the pillarWatch file that is used to tract the previous values of the pillars + previousPillarValue = 'PREVIOUSPILLARVALUENOTSAVEDINDATAFILE' + # check the contents of the dataFile that stores the previousPillarValue(s). + # find if the pillar we are checking for changes has previously been saved. if so, grab it's prior value + for l in df.splitlines(): + if pillar in l: + previousPillarValue = str(l.split(":")[1].strip()) + log.debug("pillarWatch engine: %s currentPillarValue: %s" % (pillar, currentPillarValue)) + log.debug("pillarWatch engine: %s previousPillarValue: %s" % (pillar, previousPillarValue)) + # if the pillar we are checking for changes has been defined in the dataFile, + # replace the previousPillarValue with the currentPillarValue. if it isn't in there, append it. + if pillar in df: + df = re.sub(r"\b{}\b.*".format(pillar), pillar + ': ' + currentPillarValue, df) + else: + df += pillar + ': ' + currentPillarValue + '\n' + log.trace("pillarWatch engine: df: %s" % df) + if previousPillarValue != "PREVIOUSPILLARVALUENOTSAVEDINDATAFILE": + checkChangesTakeAction() + else: + log.info("pillarWatch engine: %s was not previously tracked. not tacking action." % pillar) + + + dataFile.seek(0) + dataFile.write(df) + dataFile.truncate() + dataFile.close() diff --git a/salt/salt/files/engines.conf b/salt/salt/files/engines.conf index 7c43e99e1..4f3bc31a1 100644 --- a/salt/salt/files/engines.conf +++ b/salt/salt/files/engines.conf @@ -4,3 +4,30 @@ engines_dirs: engines: - checkmine: interval: 60 + - pillarWatch: + fpa: + - files: + - /opt/so/saltstack/local/pillar/idstools/soc_idstools.sls + - /opt/so/saltstack/local/pillar/idstools/adv_idstools.sls + pillar: idstools.config.ruleset + default: ETOPEN + actions: + from: + '*': + to: + '*': + - cmd.run: + cmd: /usr/sbin/so-rule-update + - files: + - /opt/so/saltstack/local/pillar/idstools/soc_idstools.sls + - /opt/so/saltstack/local/pillar/idstools/adv_idstools.sls + pillar: idstools.config.oinkcode + default: '' + actions: + from: + '*': + to: + '*': + - cmd.run: + cmd: /usr/sbin/so-rule-update + interval: 10 diff --git a/salt/salt/master.sls b/salt/salt/master.sls index 0a65f3e01..6e320e4a6 100644 --- a/salt/salt/master.sls +++ b/salt/salt/master.sls @@ -27,6 +27,11 @@ checkmine_engine: - source: salt://salt/engines/master/checkmine.py - makedirs: True +pillarWatch_engine: + file.managed: + - name: /etc/salt/engines/pillarWatch.py + - source: salt://salt/engines/master/pillarWatch.py + engines_config: file.managed: - name: /etc/salt/master.d/engines.conf @@ -38,6 +43,7 @@ salt_master_service: - enable: True - watch: - file: checkmine_engine + - file: pillarWatch_engine - file: engines_config - order: last From de9f6425f9901cc82697ba216484e420e2864578 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 2 May 2024 12:13:46 -0400 Subject: [PATCH 490/777] Automatically switch between Kafka output policy and logstash output policy when globals.pipeline changes Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- .../so-elastic-fleet-outputs-update | 20 ++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update index 4d2867fc7..064d49d23 100644 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update @@ -127,13 +127,27 @@ NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}') if [ "$NEW_HASH" = "$CURRENT_HASH" ]; then printf "\nHashes match - no update needed.\n" printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n" + + # Since output can be KAFKA or LOGSTASH, we need to check if the policy set as default matches the value set in GLOBALS.pipeline and update if needed + printf "Checking if the correct output policy is set as default\n" + OUTPUT_DEFAULT=$(jq -r '.item.is_default' <<< $RAW_JSON) + if [ "$OUTPUT_DEFAULT" = "false" ]; then + printf "Default output policy needs to be updated.\n" + {%- if GLOBALS.pipeline == "KAFKA" %} + update_kafka_outputs + {%- else %} + update_logstash_outputs + {%- endif %} + else + printf "Default output policy is set - no update needed.\n" + fi exit 0 else printf "\nHashes don't match - update needed.\n" printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n" -{% if GLOBALS.pipeline == "KAFKA" %} + {%- if GLOBALS.pipeline == "KAFKA" %} update_kafka_outputs -{% else %} + {%- else %} update_logstash_outputs -{% endif %} + {%- endif %} fi From f663ef8c168c880d4c6b06483e1ab5e0a406769c Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 2 May 2024 14:53:28 -0400 Subject: [PATCH 491/777] Setup Kafka to use PKCS12 and remove need for converting to JKS Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/config.sls | 14 -------------- salt/kafka/defaults.yaml | 10 +++++----- salt/kafka/enabled.sls | 4 +++- .../tools/sbin_jinja/so-kafka-generate-keystore | 13 ------------- 4 files changed, 8 insertions(+), 33 deletions(-) delete mode 100644 salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore diff --git a/salt/kafka/config.sls b/salt/kafka/config.sls index b1a31d23f..5cf6f8201 100644 --- a/salt/kafka/config.sls +++ b/salt/kafka/config.sls @@ -54,20 +54,6 @@ kafka_data_dir: - group: 960 - makedirs: True -kafka_generate_keystore: - cmd.run: - - name: "/usr/sbin/so-kafka-generate-keystore" - - onchanges: - - x509: /etc/pki/kafka.key - -kafka_keystore_perms: - file.managed: - - replace: False - - name: /etc/pki/kafka.jks - - mode: 640 - - user: 960 - - group: 939 - {% for sc in ['server', 'client'] %} kafka_kraft_{{sc}}_properties: file.managed: diff --git a/salt/kafka/defaults.yaml b/salt/kafka/defaults.yaml index 8dcd70b98..1cf7b73f1 100644 --- a/salt/kafka/defaults.yaml +++ b/salt/kafka/defaults.yaml @@ -25,19 +25,19 @@ kafka: socket_x_receive_x_buffer_x_bytes: 102400 socket_x_request_x_max_x_bytes: 104857600 socket_x_send_x_buffer_x_bytes: 102400 - ssl_x_keystore_x_location: /etc/pki/kafka.jks + ssl_x_keystore_x_location: /etc/pki/kafka.p12 + ssl_x_keystore_x_type: PKCS12 ssl_x_keystore_x_password: changeit - ssl_x_keystore_x_type: JKS ssl_x_truststore_x_location: /etc/pki/java/sos/cacerts ssl_x_truststore_x_password: changeit - transaction_x_state_x_log_x_min_x_isr: 1 + transaction_x_state_x_log_x_min_x_isr: 1n transaction_x_state_x_log_x_replication_x_factor: 1 client: security_x_protocol: SSL ssl_x_truststore_x_location: /etc/pki/java/sos/cacerts ssl_x_truststore_x_password: changeit - ssl_x_keystore_x_location: /etc/pki/kafka.jks - ssl_x_keystore_x_type: JKS + ssl_x_keystore_x_location: /etc/pki/kafka.p12 + ssl_x_keystore_x_type: PKCS12 ssl_x_keystore_x_password: changeit controller: controller_x_listener_x_names: CONTROLLER diff --git a/salt/kafka/enabled.sls b/salt/kafka/enabled.sls index 78e0d87d9..9275eca91 100644 --- a/salt/kafka/enabled.sls +++ b/salt/kafka/enabled.sls @@ -40,11 +40,13 @@ so-kafka: - {{ BINDING }} {% endfor %} - binds: - - /etc/pki/kafka.jks:/etc/pki/kafka.jks + - /etc/pki/kafka.p12:/etc/pki/kafka.p12 - /opt/so/conf/ca/cacerts:/etc/pki/java/sos/cacerts - /nsm/kafka/data/:/nsm/kafka/data/:rw - /opt/so/conf/kafka/server.properties:/kafka/config/kraft/server.properties + {% if GLOBALS.is_manager %} - /opt/so/conf/kafka/client.properties:/kafka/config/kraft/client.properties + {% endif %} - watch: {% for sc in ['server', 'client'] %} - file: kafka_kraft_{{sc}}_properties diff --git a/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore b/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore deleted file mode 100644 index 8ae9d6db2..000000000 --- a/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash -# -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -. /usr/sbin/so-common - -# Generate a new keystore -docker run -v /etc/pki/kafka.p12:/etc/pki/kafka.p12 --name so-kafka-keystore --user root --entrypoint keytool {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }} -importkeystore -srckeystore /etc/pki/kafka.p12 -srcstoretype PKCS12 -srcstorepass changeit -destkeystore /etc/pki/kafka.jks -deststoretype JKS -deststorepass changeit -noprompt -docker cp so-kafka-keystore:/etc/pki/kafka.jks /etc/pki/kafka.jks -docker rm so-kafka-keystore \ No newline at end of file From 093cbc5ebcfc59f70f9e6860006e802ce61fa7fc Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 2 May 2024 15:10:13 -0400 Subject: [PATCH 492/777] Reconfigure Kafka defaults - Set default number of partitions per topic -> 3. Helps ensure that out of the box we can take advantage of multi-node Kafka clusters via load balancing across atleast 3 brokers. Also multiple searchnodes will be able to pull from each topic. In this case 3 searchnodes (consumers) would be able to pull from all topics concurrently. - Set default replication factor -> 2. This is the minimum value required for redundancy. Every partition will have 1 replica. In this case if we have 2 brokers each topic will have 3 partitions (load balanced across brokers) and each partition will have a replica on separate broker for redundancy Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/defaults.yaml | 5 +++-- salt/kafka/soc_kafka.yaml | 5 +++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/salt/kafka/defaults.yaml b/salt/kafka/defaults.yaml index 1cf7b73f1..86d2f6e94 100644 --- a/salt/kafka/defaults.yaml +++ b/salt/kafka/defaults.yaml @@ -8,6 +8,7 @@ kafka: advertised_x_listeners: auto_x_create_x_topics_x_enable: true controller_x_quorum_x_voters: + default_x_replication_x_factor: 2 inter_x_broker_x_listener_x_name: BROKER listeners: BROKER://0.0.0.0:9092 listener_x_security_x_protocol_x_map: BROKER:SSL @@ -18,7 +19,7 @@ kafka: node_x_id: num_x_io_x_threads: 8 num_x_network_x_threads: 3 - num_x_partitions: 1 + num_x_partitions: 3 num_x_recovery_x_threads_x_per_x_data_x_dir: 1 offsets_x_topic_x_replication_x_factor: 1 process_x_roles: broker @@ -30,7 +31,7 @@ kafka: ssl_x_keystore_x_password: changeit ssl_x_truststore_x_location: /etc/pki/java/sos/cacerts ssl_x_truststore_x_password: changeit - transaction_x_state_x_log_x_min_x_isr: 1n + transaction_x_state_x_log_x_min_x_isr: 1 transaction_x_state_x_log_x_replication_x_factor: 1 client: security_x_protocol: SSL diff --git a/salt/kafka/soc_kafka.yaml b/salt/kafka/soc_kafka.yaml index 505469d6b..ba673fa68 100644 --- a/salt/kafka/soc_kafka.yaml +++ b/salt/kafka/soc_kafka.yaml @@ -26,6 +26,11 @@ kafka: title: auto.create.topics.enable forcedType: bool helpLink: kafka.html + default_x_replication_x_factor: + description: The default replication factor for automatically created topics. + title: default.replication.factor + forcedType: int + helpLink: kafka.html inter_x_broker_x_listener_x_name: description: The name of the listener used for inter-broker communication. title: inter.broker.listener.name From 3b2d3573d8e7e9b063770efaee2f119946074a1d Mon Sep 17 00:00:00 2001 From: Josh Patterson Date: Thu, 2 May 2024 16:06:04 -0400 Subject: [PATCH 493/777] Update pillarWatch.py --- salt/salt/engines/master/pillarWatch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/salt/engines/master/pillarWatch.py b/salt/salt/engines/master/pillarWatch.py index f75a6bb6b..48d364374 100644 --- a/salt/salt/engines/master/pillarWatch.py +++ b/salt/salt/engines/master/pillarWatch.py @@ -70,7 +70,7 @@ def start(fpa, interval=10): log.info("pillarWatch engine: actionReturn: %s" % actionReturn) - log.info("pillarWatch engine: ##### checking watched pillars for changes #####") + log.debug("pillarWatch engine: ##### checking watched pillars for changes #####") # try to open the file that stores the previous runs data # if the file doesn't exist, create a blank one From e9b12632495db01340aff639fb8f1ff01526f8e1 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 2 May 2024 16:32:43 -0400 Subject: [PATCH 494/777] orchestate searchnode deployment --- salt/manager/tools/sbin/so-minion | 11 +++++++---- salt/orch/deploy_searchnode.sls | 16 ++++++++++++++++ 2 files changed, 23 insertions(+), 4 deletions(-) create mode 100644 salt/orch/deploy_searchnode.sls diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index 34e069ece..8a34ddca0 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -613,11 +613,14 @@ function updateMineAndApplyStates() { if [[ "$NODETYPE" == "SEARCHNODE" || "$NODETYPE" == "HEAVYNODE" ]]; then # calls so-common and set_minionid sets MINIONID to local minion id set_minionid - salt $MINIONID state.apply elasticsearch queue=True --async - salt $MINIONID state.apply soc queue=True --async + #salt $MINIONID state.apply elasticsearch queue=True --async + # salt $MINIONID state.apply soc queue=True --async + # $MINIONID is the minion id of the manager and $MINION_ID is the target node or the node being configured + salt-run state.orch orch.deploy_searchnode pillar="{'setup': {'manager': $MINIONID, 'searchnode': $MINION_ID }}" + else + # run this async so the cli doesn't wait for a return + salt "$MINION_ID" state.highstate --async queue=True fi - # run this async so the cli doesn't wait for a return - salt "$MINION_ID" state.highstate --async queue=True } function setupMinionFiles() { diff --git a/salt/orch/deploy_searchnode.sls b/salt/orch/deploy_searchnode.sls new file mode 100644 index 000000000..a70e7bdbf --- /dev/null +++ b/salt/orch/deploy_searchnode.sls @@ -0,0 +1,16 @@ +{% set MANAGER = salt['pillar.get']('setup:manager') %} +{% set SEARCHNODE = salt['pillar.get']('setup:searchnode') %} + +manager_run_es_soc: + salt.state: + - tgt: {{ MANAGER }} + - sls: + - elasticsearch + - soc + +searchnode_run_highstate: + salt.state: + - tgt: {{ TARGET }} + - highstate: True + - require: + - salt: manager_run_es_soc From 29298770422c02d34ee30853d08649c0c334bf78 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 2 May 2024 16:37:54 -0400 Subject: [PATCH 495/777] fix var --- salt/orch/deploy_searchnode.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/orch/deploy_searchnode.sls b/salt/orch/deploy_searchnode.sls index a70e7bdbf..f36f02511 100644 --- a/salt/orch/deploy_searchnode.sls +++ b/salt/orch/deploy_searchnode.sls @@ -10,7 +10,7 @@ manager_run_es_soc: searchnode_run_highstate: salt.state: - - tgt: {{ TARGET }} + - tgt: {{ SEARCHNODE }} - highstate: True - require: - salt: manager_run_es_soc From 5fe8c6a95f587fa89e61d44a55fd1d844c0eb02d Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Fri, 3 May 2024 09:38:34 -0400 Subject: [PATCH 496/777] Update so-whiptail to make installation screen more consistent --- setup/so-whiptail | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup/so-whiptail b/setup/so-whiptail index 90bbaf397..4be002565 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -592,8 +592,8 @@ whiptail_install_type() { "IMPORT" "Import PCAP or log files " \ "EVAL" "Evaluation mode (not for production) " \ "STANDALONE" "Standalone production install " \ - "DISTRIBUTED" "Distributed install submenu " \ - "DESKTOP" "Install Security Onion Desktop" \ + "DISTRIBUTED" "Distributed deployment " \ + "DESKTOP" "Security Onion Desktop" \ 3>&1 1>&2 2>&3 ) elif [[ "$OSVER" == "focal" ]]; then From bbc374b56ea0a324d926184cfb8c666190005756 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 3 May 2024 09:56:52 -0400 Subject: [PATCH 497/777] add logic in orch --- salt/manager/tools/sbin/so-minion | 16 +++++++------- salt/orch/deploy_newnode.sls | 36 +++++++++++++++++++++++++++++++ salt/orch/deploy_searchnode.sls | 16 -------------- 3 files changed, 44 insertions(+), 24 deletions(-) create mode 100644 salt/orch/deploy_newnode.sls delete mode 100644 salt/orch/deploy_searchnode.sls diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index 8a34ddca0..3f8adfa31 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -607,20 +607,20 @@ function updateMineAndApplyStates() { # tell the minion to populate the mine with data from mine_functions which is populated during setup # this only needs to happen on non managers since they handle this during setup # and they need to wait for ca creation to update the mine - updateMine - checkMine "network.ip_addrs" + #updateMine + #checkMine "network.ip_addrs" # apply the elasticsearch state to the manager if a new searchnode was added - if [[ "$NODETYPE" == "SEARCHNODE" || "$NODETYPE" == "HEAVYNODE" ]]; then + #if [[ "$NODETYPE" == "SEARCHNODE" || "$NODETYPE" == "HEAVYNODE" ]]; then # calls so-common and set_minionid sets MINIONID to local minion id - set_minionid + set_minionid #salt $MINIONID state.apply elasticsearch queue=True --async # salt $MINIONID state.apply soc queue=True --async # $MINIONID is the minion id of the manager and $MINION_ID is the target node or the node being configured - salt-run state.orch orch.deploy_searchnode pillar="{'setup': {'manager': $MINIONID, 'searchnode': $MINION_ID }}" - else + salt-run state.orch orch.deploy_newnode pillar="{'setup': {'manager': $MINIONID, 'newnode': $MINION_ID }}" + #else # run this async so the cli doesn't wait for a return - salt "$MINION_ID" state.highstate --async queue=True - fi + # salt "$MINION_ID" state.highstate --async queue=True + #fi } function setupMinionFiles() { diff --git a/salt/orch/deploy_newnode.sls b/salt/orch/deploy_newnode.sls new file mode 100644 index 000000000..94ed86723 --- /dev/null +++ b/salt/orch/deploy_newnode.sls @@ -0,0 +1,36 @@ +{% set MANAGER = salt['pillar.get']('setup:manager') %} +{% set NEWNODE = salt['pillar.get']('setup:newnode') %} + +{{NEWNODE}}_update_mine: + salt.function: + - name: mine.update + - tgt: {{ NEWNODE }} + - retry: + attempts: 24 + interval: 5 + +{% if NEWNODE.split('_')|last in ['searchnode', 'heavynode'] %} +manager_run_es_soc: + salt.state: + - tgt: {{ MANAGER }} + - sls: + - elasticsearch + - soc + - kwarg: + queue: True + - retry: + attempts: 30 + interval: 10 + - require: + - salt: new_node_update_mine +{% endif %} + +{{NEWNODE}}_run_highstate: + salt.state: + - tgt: {{ NEWNODE }} + - highstate: True + - kwarg: + queue: True + - retry: + attempts: 30 + interval: 10 diff --git a/salt/orch/deploy_searchnode.sls b/salt/orch/deploy_searchnode.sls deleted file mode 100644 index f36f02511..000000000 --- a/salt/orch/deploy_searchnode.sls +++ /dev/null @@ -1,16 +0,0 @@ -{% set MANAGER = salt['pillar.get']('setup:manager') %} -{% set SEARCHNODE = salt['pillar.get']('setup:searchnode') %} - -manager_run_es_soc: - salt.state: - - tgt: {{ MANAGER }} - - sls: - - elasticsearch - - soc - -searchnode_run_highstate: - salt.state: - - tgt: {{ SEARCHNODE }} - - highstate: True - - require: - - salt: manager_run_es_soc From fa3522a2333a25eb8eb63fabbfd52c178c9b466b Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 3 May 2024 11:10:21 -0400 Subject: [PATCH 498/777] fix requirement --- salt/orch/deploy_newnode.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/orch/deploy_newnode.sls b/salt/orch/deploy_newnode.sls index 94ed86723..bf4a67574 100644 --- a/salt/orch/deploy_newnode.sls +++ b/salt/orch/deploy_newnode.sls @@ -22,7 +22,7 @@ manager_run_es_soc: attempts: 30 interval: 10 - require: - - salt: new_node_update_mine + - salt: {{NEWNODE}}_update_mine {% endif %} {{NEWNODE}}_run_highstate: From 442a717d75f071fff0c7e7aebc3e8bb50692209d Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 3 May 2024 12:08:57 -0400 Subject: [PATCH 499/777] orchit --- salt/orch/deploy_newnode.sls | 23 +++++++++++++++-------- setup/so-setup | 1 - 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/salt/orch/deploy_newnode.sls b/salt/orch/deploy_newnode.sls index bf4a67574..182fff06c 100644 --- a/salt/orch/deploy_newnode.sls +++ b/salt/orch/deploy_newnode.sls @@ -16,11 +16,10 @@ manager_run_es_soc: - sls: - elasticsearch - soc - - kwarg: - queue: True + - queue: True - retry: - attempts: 30 - interval: 10 + attempts: 3 + interval: 60 - require: - salt: {{NEWNODE}}_update_mine {% endif %} @@ -29,8 +28,16 @@ manager_run_es_soc: salt.state: - tgt: {{ NEWNODE }} - highstate: True - - kwarg: - queue: True + - queue: True - retry: - attempts: 30 - interval: 10 + attempts: 5 + interval: 60 + +{{NEWNODE}}_set_highstate_cron: + salt.state: + - tgt: {{ NEWNODE }} + - sls: + - setup.highstate_cron + - queue: True + - onfail: + - salt: {{NEWNODE}}_run_highstate diff --git a/setup/so-setup b/setup/so-setup index 1c3be22bf..8a1879c58 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -808,7 +808,6 @@ if ! [[ -f $install_opt_file ]]; then configure_minion "$minion_type" check_sos_appliance drop_install_options - logCmd "salt-call state.apply setup.highstate_cron --local --file-root=../salt/" verify_setup fi From 6cbbb81cadeb4712dcb74c0f03fae02d171fcdf7 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Fri, 3 May 2024 12:59:41 -0400 Subject: [PATCH 500/777] FEATURE: Add hyperlink to airgap screen in setup #12925 --- setup/so-whiptail | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-whiptail b/setup/so-whiptail index 4be002565..06d62a027 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -14,7 +14,7 @@ whiptail_airgap() { [[ $is_manager || $is_import ]] && node_str='manager' INTERWEBS=$(whiptail --title "$whiptail_title" --menu \ - "How should this $node_str be installed?" 10 70 2 \ + "How should this $node_str be installed?\n\nFor more information, please see:\n$DOC_BASE_URL/airgap.html" 13 70 2 \ "Standard " "This $node_str has access to the Internet" \ "Airgap " "This $node_str does not have access to the Internet" 3>&1 1>&2 2>&3 ) From 3d4fd59a159901d3deb34381fbff98e88d1d953d Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 3 May 2024 13:48:51 -0400 Subject: [PATCH 501/777] orchit --- salt/manager/tools/sbin/so-minion | 21 +++------------------ salt/orch/deploy_newnode.sls | 6 +++++- 2 files changed, 8 insertions(+), 19 deletions(-) diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index 3f8adfa31..e31ec87d3 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -426,10 +426,6 @@ function checkMine() { } -function updateMine() { - retry 20 1 "salt '$MINION_ID' mine.update" True -} - function createEVAL() { is_pcaplimit=true pcapspace @@ -604,23 +600,12 @@ function addMinion() { } function updateMineAndApplyStates() { - # tell the minion to populate the mine with data from mine_functions which is populated during setup - # this only needs to happen on non managers since they handle this during setup - # and they need to wait for ca creation to update the mine - #updateMine + #checkMine "network.ip_addrs" - # apply the elasticsearch state to the manager if a new searchnode was added - #if [[ "$NODETYPE" == "SEARCHNODE" || "$NODETYPE" == "HEAVYNODE" ]]; then - # calls so-common and set_minionid sets MINIONID to local minion id + # calls so-common and set_minionid sets MINIONID to local minion id set_minionid - #salt $MINIONID state.apply elasticsearch queue=True --async - # salt $MINIONID state.apply soc queue=True --async - # $MINIONID is the minion id of the manager and $MINION_ID is the target node or the node being configured + # $MINIONID is the minion id of the manager and $MINION_ID is the target node or the node being configured salt-run state.orch orch.deploy_newnode pillar="{'setup': {'manager': $MINIONID, 'newnode': $MINION_ID }}" - #else - # run this async so the cli doesn't wait for a return - # salt "$MINION_ID" state.highstate --async queue=True - #fi } function setupMinionFiles() { diff --git a/salt/orch/deploy_newnode.sls b/salt/orch/deploy_newnode.sls index 182fff06c..a2e6b147f 100644 --- a/salt/orch/deploy_newnode.sls +++ b/salt/orch/deploy_newnode.sls @@ -1,14 +1,18 @@ {% set MANAGER = salt['pillar.get']('setup:manager') %} {% set NEWNODE = salt['pillar.get']('setup:newnode') %} +# tell the minion to populate the mine with data from mine_functions which is populated during setup +# this only needs to happen on non managers since they handle this during setup +# and they need to wait for ca creation to update the mine {{NEWNODE}}_update_mine: salt.function: - name: mine.update - tgt: {{ NEWNODE }} - retry: - attempts: 24 + attempts: 36 interval: 5 +# we need to prepare the manager for a new searchnode or heavynode {% if NEWNODE.split('_')|last in ['searchnode', 'heavynode'] %} manager_run_es_soc: salt.state: From bdf1b45a07252a03b31287bb2a86114ed59514df Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 3 May 2024 14:54:44 -0400 Subject: [PATCH 502/777] redirect and throw in bg --- salt/manager/tools/sbin/so-minion | 2 +- salt/orch/deploy_newnode.sls | 15 --------------- 2 files changed, 1 insertion(+), 16 deletions(-) diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index e31ec87d3..79eea59fe 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -605,7 +605,7 @@ function updateMineAndApplyStates() { # calls so-common and set_minionid sets MINIONID to local minion id set_minionid # $MINIONID is the minion id of the manager and $MINION_ID is the target node or the node being configured - salt-run state.orch orch.deploy_newnode pillar="{'setup': {'manager': $MINIONID, 'newnode': $MINION_ID }}" + salt-run state.orch orch.deploy_newnode pillar="{'setup': {'manager': $MINIONID, 'newnode': $MINION_ID }}" > /dev/null 2>&1 & } function setupMinionFiles() { diff --git a/salt/orch/deploy_newnode.sls b/salt/orch/deploy_newnode.sls index a2e6b147f..c05a812a3 100644 --- a/salt/orch/deploy_newnode.sls +++ b/salt/orch/deploy_newnode.sls @@ -21,9 +21,6 @@ manager_run_es_soc: - elasticsearch - soc - queue: True - - retry: - attempts: 3 - interval: 60 - require: - salt: {{NEWNODE}}_update_mine {% endif %} @@ -33,15 +30,3 @@ manager_run_es_soc: - tgt: {{ NEWNODE }} - highstate: True - queue: True - - retry: - attempts: 5 - interval: 60 - -{{NEWNODE}}_set_highstate_cron: - salt.state: - - tgt: {{ NEWNODE }} - - sls: - - setup.highstate_cron - - queue: True - - onfail: - - salt: {{NEWNODE}}_run_highstate From 7f12d4c81589792f138ca8c3d07e60b2b8ed6f54 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Fri, 3 May 2024 15:22:53 -0400 Subject: [PATCH 503/777] Exclude new sigma rules --- setup/so-verify | 1 + 1 file changed, 1 insertion(+) diff --git a/setup/so-verify b/setup/so-verify index b4c79a88c..d22b80fc2 100755 --- a/setup/so-verify +++ b/setup/so-verify @@ -67,6 +67,7 @@ log_has_errors() { grep -vE "Reading first line of patchfile" | \ grep -vE "Command failed with exit code" | \ grep -vE "Running scope as unit" | \ + grep -vE "securityonion-resources/sigma/stable" | \ grep -vE "log-.*-pipeline_failed_attempts" &> "$error_log" if [[ $? -eq 0 ]]; then From 7b905f5a946b3408775863bf5ad1e216ed3f9454 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Mon, 6 May 2024 08:22:08 -0400 Subject: [PATCH 504/777] FEATURE: Add Events table columns for tunnel logs #12937 --- salt/soc/defaults.yaml | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index ad154e9d1..f2bf77805 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -502,16 +502,15 @@ soc: - syslog.severity - log.id.uid - event.dataset - '::tunnels': + '::tunnel': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - - tunnel_type - - action - - log.id.uid - - event.dataset + - event.action + - tunnel.type '::weird': - soc_timestamp - source.ip From 26c6a98b45369e522d3a396b7e92623a0a81eb6c Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Mon, 6 May 2024 08:43:01 -0400 Subject: [PATCH 505/777] Initial airgap support for detections --- salt/soc/soc_soc.yaml | 26 +++++++++----------------- 1 file changed, 9 insertions(+), 17 deletions(-) diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index 4b88a5f84..a9d6bac08 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -107,7 +107,7 @@ soc: advanced: True helpLink: sigma.html rulesRepos: - description: 'Custom Git repos to pull Sigma rules from. License field is required, folder is optional.' + description: 'Custom Git repos to pull Sigma rules from. "license" field is required, "folder" is optional. "community" disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled.' global: True advanced: True forcedType: "[]{}" @@ -117,8 +117,8 @@ soc: global: True advanced: False helpLink: sigma.html - autoUpdateEnabled: - description: 'Set to true to enable automatic Internet-connected updates of the Sigma Community Ruleset. If this is an Airgap system, this setting will be overridden and set to false.' + airgapEnabled: + description: 'This setting dynamically changes to the current status of Airgap on this system and is used during the Sigma ruleset update process.' global: True advanced: True helpLink: sigma.html @@ -185,31 +185,27 @@ soc: advanced: True strelkaengine: allowRegex: - description: 'Regex used to filter imported Yara rules. Deny regex takes precedence over the Allow regex setting.' + description: 'Regex used to filter imported YARA rules. Deny regex takes precedence over the Allow regex setting.' global: True advanced: True helpLink: yara.html - autoEnabledYaraRules: - description: 'Yara rules to automatically enable on initial import. Format is $Ruleset - for example, for the default shipped ruleset: securityonion-yara' + autoEnabledYARARules: + description: 'YARA rules to automatically enable on initial import. Format is $Ruleset - for example, for the default shipped ruleset: securityonion-yara' global: True advanced: True helpLink: sigma.html - autoUpdateEnabled: - description: 'Set to true to enable automatic Internet-connected updates of the Yara rulesets. If this is an Airgap system, this setting will be overridden and set to false.' - global: True - advanced: True denyRegex: - description: 'Regex used to filter imported Yara rules. Deny regex takes precedence over the Allow regex setting.' + description: 'Regex used to filter imported YARA rules. Deny regex takes precedence over the Allow regex setting.' global: True advanced: True helpLink: yara.html communityRulesImportFrequencySeconds: - description: 'How often to check for new Yara rules (in seconds). This applies to both Community Rules and any configured Git repos.' + description: 'How often to check for new YARA rules (in seconds). This applies to both Community Rules and any configured Git repos.' global: True advanced: True helpLink: yara.html rulesRepos: - description: 'Custom Git repos to pull Yara rules from. License field is required' + description: 'Custom Git repos to pull YARA rules from. "license" field is required, "folder" is optional. "community" disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled.'' global: True advanced: True forcedType: "[]{}" @@ -220,10 +216,6 @@ soc: global: True advanced: True helpLink: suricata.html - autoUpdateEnabled: - description: 'Set to true to enable automatic Internet-connected updates of the Suricata rulesets. If this is an Airgap system, this setting will be overridden and set to false.' - global: True - advanced: True denyRegex: description: 'Regex used to filter imported Suricata rules. Deny regex takes precedence over the Allow regex setting.' global: True From f689cfcd0ae8d44a335b9fe313aa26e49cf2a83f Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Mon, 6 May 2024 08:52:43 -0400 Subject: [PATCH 506/777] FEATURE: Add Events table columns for stun logs #12940 --- salt/soc/defaults.yaml | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index f2bf77805..593b55b07 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -491,6 +491,17 @@ soc: - ssl.version - log.id.uid - event.dataset + '::stun': + - soc_timestamp + - event.dataset + - source.ip + - source.port + - destination.ip + - destination.port + - stun.class + - stun.method + - stun.attribute.types + - log.id.uid ':zeek:syslog': - soc_timestamp - source.ip @@ -1841,7 +1852,7 @@ soc: query: 'event.dataset:zeek.ssl | groupby ssl.version | groupby ssl.validation_status | groupby -sankey ssl.validation_status ssl.server_name | groupby ssl.server_name | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' - name: STUN description: STUN (Session Traversal Utilities for NAT) network metadata - query: 'tags:stun* | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination.geo.country_name | groupby event.dataset' + query: 'tags:stun* | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination.geo.country_name | groupby stun.class | groupby -sankey stun.class stun.method | groupby stun.method | groupby stun.attribute.types' - name: Syslog description: Syslog logs query: 'tags:syslog | groupby syslog.severity_label | groupby syslog.facility_label | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby network.protocol | groupby event.dataset' From 3f73b14a6a8ff3fa7682ef9e9c180d5ad21ca9fc Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Mon, 6 May 2024 09:20:47 -0400 Subject: [PATCH 507/777] FEATURE: Add event.dataset to all Events table layouts #12641 --- salt/soc/defaults.yaml | 237 +++++++++++++++++++++-------------------- 1 file changed, 121 insertions(+), 116 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 593b55b07..b6a52fd75 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -81,22 +81,23 @@ soc: eventFields: default: - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - log.id.uid - network.community_id - - event.dataset ':kratos:': - soc_timestamp + - event.dataset - http_request.headers.x-real-ip - identity_id - http_request.headers.user-agent - - event.dataset - msg '::conn': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -105,9 +106,9 @@ soc: - network.protocol - log.id.uid - network.community_id - - event.dataset '::dce_rpc': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -116,27 +117,27 @@ soc: - dce_rpc.named_pipe - dce_rpc.operation - log.id.uid - - event.dataset '::dhcp': - soc_timestamp + - event.dataset - client.address - server.address - host.domain - host.hostname - dhcp.message_types - log.id.uid - - event.dataset '::dnp3': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - dnp3.fc_reply - log.id.uid - - event.dataset '::dnp3_control': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -144,9 +145,9 @@ soc: - dnp3.function_code - dnp3.block_type - log.id.uid - - event.dataset '::dnp3_objects': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -154,9 +155,9 @@ soc: - dnp3.function_code - dnp3.object_type - log.id.uid - - event.dataset '::dns': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -167,9 +168,9 @@ soc: - dns.response.code_name - log.id.uid - network.community_id - - event.dataset '::dpd': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -178,9 +179,9 @@ soc: - observer.analyser - error.reason - log.id.uid - - event.dataset '::file': - soc_timestamp + - event.dataset - source.ip - destination.ip - file.name @@ -189,9 +190,9 @@ soc: - file.bytes.total - log.id.fuid - log.id.uid - - event.dataset '::ftp': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -202,9 +203,9 @@ soc: - ftp.reply_code - file.size - log.id.uid - - event.dataset '::http': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -217,9 +218,9 @@ soc: - http.response.body.length - log.id.uid - network.community_id - - event.dataset '::intel': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -228,9 +229,9 @@ soc: - intel.indicator_type - intel.seen_where - log.id.uid - - event.dataset '::irc': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -241,9 +242,9 @@ soc: - irc.command.value - irc.command.info - log.id.uid - - event.dataset '::kerberos': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -252,18 +253,18 @@ soc: - kerberos.service - kerberos.request_type - log.id.uid - - event.dataset '::modbus': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - modbus.function - log.id.uid - - event.dataset '::mysql': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -273,9 +274,9 @@ soc: - mysql.success - mysql.response - log.id.uid - - event.dataset '::notice': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -285,9 +286,9 @@ soc: - log.id.fuid - log.id.uid - network.community_id - - event.dataset '::ntlm': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -298,18 +299,18 @@ soc: - ntlm.server.nb.name - ntlm.server.tree.name - log.id.uid - - event.dataset '::pe': - soc_timestamp + - event.dataset - file.is_64bit - file.is_exe - file.machine - file.os - file.subsystem - log.id.fuid - - event.dataset '::radius': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -319,9 +320,9 @@ soc: - radius.framed_address - radius.reply_message - radius.result - - event.dataset '::rdp': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -335,9 +336,9 @@ soc: - rdp.result - rdp.security_protocol - log.id.uid - - event.dataset '::rfb': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -347,9 +348,9 @@ soc: - rfb.share_flag - rfb.desktop.name - log.id.uid - - event.dataset '::signatures': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -361,9 +362,9 @@ soc: - signature_count - host.count - log.id.uid - - event.dataset '::sip': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -379,9 +380,9 @@ soc: - sip.user_agent - sip.status_code - log.id.uid - - event.dataset '::smb_files': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -393,9 +394,9 @@ soc: - file.size - file.prev_name - log.id.uid - - event.dataset '::smb_mapping': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -404,9 +405,9 @@ soc: - smb.service - smb.share_type - log.id.uid - - event.dataset '::smtp': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -417,9 +418,9 @@ soc: - smtp.useragent - log.id.uid - network.community_id - - event.dataset '::snmp': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -427,9 +428,9 @@ soc: - snmp.community - snmp.version - log.id.uid - - event.dataset '::socks': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -439,15 +440,15 @@ soc: - socks.request.port - socks.status - log.id.uid - - event.dataset '::software': - soc_timestamp + - event.dataset - source.ip - software.name - software.type - - event.dataset '::ssh': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -458,9 +459,9 @@ soc: - ssh.client - ssh.server - log.id.uid - - event.dataset ':suricata:ssl': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -469,9 +470,9 @@ soc: - ssl.certificate.subject - ssl.version - log.id.uid - - event.dataset ':zeek:ssl': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -480,9 +481,9 @@ soc: - ssl.validation_status - ssl.version - log.id.uid - - event.dataset '::ssl': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -490,7 +491,6 @@ soc: - ssl.server_name - ssl.version - log.id.uid - - event.dataset '::stun': - soc_timestamp - event.dataset @@ -504,6 +504,7 @@ soc: - log.id.uid ':zeek:syslog': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -512,7 +513,6 @@ soc: - network.protocol - syslog.severity - log.id.uid - - event.dataset '::tunnel': - soc_timestamp - event.dataset @@ -524,23 +524,24 @@ soc: - tunnel.type '::weird': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - weird.name - log.id.uid - - event.dataset '::x509': - soc_timestamp + - event.dataset - x509.certificate.subject - x509.certificate.key.type - x509.certificate.key.length - x509.certificate.issuer - log.id.fuid - - event.dataset '::firewall': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -550,9 +551,9 @@ soc: - observer.ingress.interface.name - event.action - network.community_id - - event.dataset ':pfsense:': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -562,9 +563,9 @@ soc: - observer.ingress.interface.name - event.action - network.community_id - - event.dataset ':osquery:': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -572,27 +573,27 @@ soc: - source.hostname - process.executable - user.name - - event.dataset ':strelka:': - soc_timestamp + - event.dataset - file.name - file.size - hash.md5 - file.source - file.mime_type - log.id.fuid - - event.dataset ':strelka:file': - soc_timestamp + - event.dataset - file.name - file.size - hash.md5 - file.source - file.mime_type - log.id.fuid - - event.dataset ':suricata:': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -602,35 +603,35 @@ soc: - event.severity_label - log.id.uid - network.community_id - - event.dataset ':windows_eventlog:': - soc_timestamp - - user.name - event.dataset + - user.name ':elasticsearch:': - soc_timestamp + - event.dataset - agent.name - message - log.level - metadata.version - metadata.pipeline - - event.dataset ':kibana:': - soc_timestamp + - event.dataset - host.name - message - kibana.log.meta.req.headers.x-real-ip - - event.dataset ':syslog:syslog': - soc_timestamp + - event.dataset - host.name - metadata.ip_address - real_message - syslog.priority - syslog.application - - event.dataset ':aws:': - soc_timestamp + - event.dataset - aws.cloudtrail.event_category - aws.cloudtrail.event_type - event.provider @@ -640,25 +641,25 @@ soc: - user.name - source.ip - source.geo.region_iso_code - - event.dataset ':squid:': - soc_timestamp + - event.dataset - url.original - destination.ip - destination.geo.country_iso_code - user.name - source.ip - - event.dataset '::sysmon_operational': - soc_timestamp + - event.dataset - event.action - winlog.computer_name - user.name - process.executable - process.pid - - event.dataset '::network_connection': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -666,59 +667,59 @@ soc: - source.hostname - process.executable - user.name - - event.dataset '::process_terminated': - soc_timestamp + - event.dataset - process.executable - process.pid - winlog.computer_name - - event.dataset '::file_create': - soc_timestamp + - event.dataset - file.target - process.executable - process.pid - winlog.computer_name - - event.dataset '::registry_value_set': - soc_timestamp + - event.dataset - winlog.event_data.TargetObject - process.executable - process.pid - winlog.computer_name - - event.dataset '::process_creation': - soc_timestamp + - event.dataset - process.command_line - process.pid - process.parent.executable - process.working_directory - - event.dataset '::registry_create_delete': - soc_timestamp + - event.dataset - winlog.event_data.TargetObject - process.executable - process.pid - winlog.computer_name - - event.dataset '::dns_query': - soc_timestamp + - event.dataset - dns.query.name - dns.answers.name - process.executable - winlog.computer_name - - event.dataset '::file_create_stream_hash': - soc_timestamp + - event.dataset - file.target - hash.md5 - hash.sha256 - process.executable - process.pid - winlog.computer_name - - event.dataset '::bacnet': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -726,9 +727,9 @@ soc: - bacnet.bclv.function - bacnet.result.code - log.id.uid - - event.dataset '::bacnet_discovery': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -736,9 +737,9 @@ soc: - bacnet.vendor - bacnet.pdu.service - log.id.uid - - event.dataset '::bacnet_property': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -746,9 +747,9 @@ soc: - bacnet.property - bacnet.pdu.service - log.id.uid - - event.dataset '::bsap_ip_header': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -756,16 +757,16 @@ soc: - bsap.message.type - bsap.number.messages - log.id.uid - - event.dataset '::bsap_ip_rdb': - soc_timestamp + - event.dataset - bsap.application.function - bsap.application.sub.function - bsap.vector.variables - log.id.uid - - event.dataset '::bsap_serial_header': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -774,15 +775,15 @@ soc: - bsap.destination.function - bsap.message.type - log.id.uid - - event.dataset '::bsap_serial_rdb': - soc_timestamp + - event.dataset - bsap.rdb.function - bsap.vector.variables - log.id.uid - - event.dataset '::cip': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -790,9 +791,9 @@ soc: - cip.service - cip.status_code - log.id.uid - - event.dataset '::cip_identity': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -800,9 +801,9 @@ soc: - cip.device.type.name - cip.vendor.name - log.id.uid - - event.dataset '::cip_io': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -810,63 +811,63 @@ soc: - cip.connection.id - cip.io.data - log.id.uid - - event.dataset '::cotp': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - cotp.pdu.name - log.id.uid - - event.dataset '::ecat_arp_info': - soc_timestamp + - event.dataset - source.ip - destination.ip - source.mac - destination.mac - ecat.arp.type - - event.dataset '::ecat_aoe_info': - soc_timestamp + - event.dataset - source.mac - source.port - destination.mac - destination.port - ecat.command - - event.dataset '::ecat_coe_info': - soc_timestamp + - event.dataset - ecat.message.number - ecat.message.type - ecat.request.response.type - ecat.index - ecat.sub.index - - event.dataset '::ecat_dev_info': - soc_timestamp + - event.dataset - ecat.device.type - ecat.features - ecat.ram.size - ecat.revision - ecat.slave.address - - event.dataset '::ecat_log_address': - soc_timestamp + - event.dataset - source.mac - destination.mac - ecat.command - - event.dataset '::ecat_registers': - soc_timestamp + - event.dataset - source.mac - destination.mac - ecat.command - ecat.register.type - - event.dataset '::enip': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -874,18 +875,18 @@ soc: - enip.command - enip.status_code - log.id.uid - - event.dataset '::modbus_detailed': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - modbus.function - log.id.uid - - event.dataset '::opcua_binary': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -893,9 +894,9 @@ soc: - opcua.identifier_string - opcua.message_type - log.id.uid - - event.dataset '::opcua_binary_activate_session': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -904,9 +905,9 @@ soc: - opcua.identifier_string - opcua.user_name - log.id.uid - - event.dataset '::opcua_binary_activate_session_diagnostic_info': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -914,9 +915,9 @@ soc: - opcua.activate_session_diag_info_link_id - opcua.diag_info_link_id - log.id.uid - - event.dataset '::opcua_binary_activate_session_locale_id': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -924,9 +925,9 @@ soc: - opcua.local_id - opcua.locale_link_id - log.id.uid - - event.dataset '::opcua_binary_browse': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -934,17 +935,17 @@ soc: - opcua.link_id - opcua.service_type - log.id.uid - - event.dataset '::opcua_binary_browse_description': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - log.id.uid - - event.dataset '::opcua_binary_browse_response_references': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -952,27 +953,27 @@ soc: - opcua.node_class - opcua.display_name_text - log.id.uid - - event.dataset '::opcua_binary_browse_result': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - opcua.response_link_id - log.id.uid - - event.dataset '::opcua_binary_create_session': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - opcua.link_id - log.id.uid - - event.dataset '::opcua_binary_create_session_endpoints': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -980,27 +981,27 @@ soc: - opcua.endpoint_link_id - opcua.endpoint_url - log.id.uid - - event.dataset '::opcua_binary_create_session_user_token': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - opcua.user_token_link_id - log.id.uid - - event.dataset '::opcua_binary_create_subscription': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - opcua.link_id - log.id.uid - - event.dataset '::opcua_binary_get_endpoints': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -1008,9 +1009,9 @@ soc: - opcua.endpoint_url - opcua.link_id - log.id.uid - - event.dataset '::opcua_binary_get_endpoints_description': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -1018,9 +1019,9 @@ soc: - opcua.endpoint_description_link_id - opcua.endpoint_uri - log.id.uid - - event.dataset '::opcua_binary_get_endpoints_user_token': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -1028,9 +1029,9 @@ soc: - opcua.user_token_link_id - opcua.user_token_type - log.id.uid - - event.dataset '::opcua_binary_read': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -1038,9 +1039,9 @@ soc: - opcua.link_id - opcua.read_results_link_id - log.id.uid - - event.dataset '::opcua_binary_status_code_detail': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -1048,9 +1049,9 @@ soc: - opcua.info_type_string - opcua.source_string - log.id.uid - - event.dataset '::profinet': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -1058,18 +1059,18 @@ soc: - profinet.index - profinet.operation_type - log.id.uid - - event.dataset '::profinet_dce_rpc': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - profinet.operation - log.id.uid - - event.dataset '::s7comm': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -1077,9 +1078,9 @@ soc: - s7.ros.control.name - s7.function.name - log.id.uid - - event.dataset '::s7comm_plus': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -1087,9 +1088,9 @@ soc: - s7.opcode.name - s7.version - log.id.uid - - event.dataset '::s7comm_read_szl': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -1097,9 +1098,9 @@ soc: - s7.szl_id_name - s7.return_code_name - log.id.uid - - event.dataset '::s7comm_upload_download': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -1107,52 +1108,52 @@ soc: - s7.ros.control.name - s7.function_code - log.id.uid - - event.dataset '::tds': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - tds.command - log.id.uid - - event.dataset '::tds_rpc': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - tds.procedure_name - log.id.uid - - event.dataset '::tds_sql_batch': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - tds.header_type - log.id.uid - - event.dataset ':endpoint:events_x_api': - soc_timestamp + - event.dataset - host.name - user.name - process.name - process.Ext.api.name - process.thread.Ext.call_stack_final_user_module.path - - event.dataset ':endpoint:events_x_file': - soc_timestamp + - event.dataset - host.name - user.name - process.name - event.action - file.path - - event.dataset ':endpoint:events_x_library': - soc_timestamp + - event.dataset - host.name - user.name - process.name @@ -1160,9 +1161,9 @@ soc: - dll.path - dll.code_signature.status - dll.code_signature.subject_name - - event.dataset ':endpoint:events_x_network': - soc_timestamp + - event.dataset - host.name - user.name - process.name @@ -1172,43 +1173,43 @@ soc: - destination.ip - destination.port - network.community_id - - event.dataset ':endpoint:events_x_process': - soc_timestamp + - event.dataset - host.name - user.name - process.parent.name - process.name - event.action - process.working_directory - - event.dataset ':endpoint:events_x_registry': - soc_timestamp + - event.dataset - host.name - user.name - process.name - event.action - registry.path - - event.dataset ':endpoint:events_x_security': - soc_timestamp + - event.dataset - host.name - user.name - process.executable - event.action - event.outcome - - event.dataset ':system:': - soc_timestamp + - event.dataset - process.name - process.pid - user.effective.name - user.name - system.auth.sudo.command - - event.dataset - message ':opencanary:': - soc_timestamp + - event.dataset - source.ip - source.port - logdata.HOSTNAME @@ -1216,20 +1217,20 @@ soc: - logdata.PATH - logdata.USERNAME - logdata.USERAGENT - - event.dataset ':elastic_agent:': - soc_timestamp - event.dataset - message ':kismet:': - soc_timestamp + - event.dataset - device.manufacturer - client.mac - network.wireless.ssid - network.wireless.bssid - - event.dataset ':playbook:': - soc_timestamp + - event.dataset - rule.name - event.severity_label - event_data.event.dataset @@ -1241,6 +1242,7 @@ soc: - event_data.process.pid ':sigma:': - soc_timestamp + - event.dataset - rule.name - event.severity_label - event_data.event.dataset @@ -1954,6 +1956,7 @@ soc: eventFields: default: - soc_timestamp + - event.dataset - rule.name - event.severity_label - source.ip @@ -1966,6 +1969,7 @@ soc: - rule.rev ':playbook:': - soc_timestamp + - event.dataset - rule.name - event.severity_label - event_data.event.dataset @@ -1977,6 +1981,7 @@ soc: - event_data.process.pid ':sigma:': - soc_timestamp + - event.dataset - rule.name - event.severity_label - event_data.event.dataset @@ -1989,13 +1994,13 @@ soc: - event_data.process.pid ':strelka:': - soc_timestamp + - event.dataset - file.name - file.size - hash.md5 - file.source - file.mime_type - log.id.fuid - - event.dataset queryBaseFilter: tags:alert queryToggleFilters: - name: acknowledged From 5b966b83a9881a35c1a040561255af3b000739d1 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 6 May 2024 09:26:52 -0400 Subject: [PATCH 508/777] change rulesRepos for airgap or not --- salt/soc/defaults.yaml | 24 +++++++++++++++++------- salt/soc/merged.map.jinja | 9 +++++++++ salt/soc/soc_soc.yaml | 24 ++++++++++++++---------- 3 files changed, 40 insertions(+), 17 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index ad154e9d1..1f96c63a8 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1274,10 +1274,15 @@ soc: rulesFingerprintFile: /opt/sensoroni/fingerprints/sigma.fingerprint stateFilePath: /opt/sensoroni/fingerprints/elastalertengine.state rulesRepos: - - repo: https://github.com/Security-Onion-Solutions/securityonion-resources - license: Elastic-2.0 - folder: sigma/stable - community: true + default: + - repo: https://github.com/Security-Onion-Solutions/securityonion-resources + license: Elastic-2.0 + folder: sigma/stable + community: true + airgap: + - repo: file:///nsm/rules/detect-sigma/repos/securityonion-resources + license: DRL + community: true sigmaRulePackages: - core - emerging_threats_addon @@ -1333,9 +1338,14 @@ soc: denyRegex: '' reposFolder: /opt/sensoroni/yara/repos rulesRepos: - - repo: https://github.com/Security-Onion-Solutions/securityonion-yara - license: DRL - community: true + default: + - repo: https://github.com/Security-Onion-Solutions/securityonion-yara + license: DRL + community: true + airgap: + - repo: file:///nsm/rules/detect-yara/repos/securityonion-yara + license: DRL + community: true yaraRulesFolder: /opt/sensoroni/yara/rules stateFilePath: /opt/sensoroni/fingerprints/strelkaengine.state suricataengine: diff --git a/salt/soc/merged.map.jinja b/salt/soc/merged.map.jinja index 222566dba..e31fabf2a 100644 --- a/salt/soc/merged.map.jinja +++ b/salt/soc/merged.map.jinja @@ -37,6 +37,15 @@ {% do SOCMERGED.config.server.modules.elastalertengine.update({'autoEnabledSigmaRules': SOCMERGED.config.server.modules.elastalertengine.autoEnabledSigmaRules.default}) %} {% endif %} +{# set elastalertengine.rulesRepos and strelkaengine.rulesRepos based on airgap or not #} +{% if GLOBALS.airgap %} +{% do SOCMERGED.config.server.modules.elastalertengine.update({'rulesRepos': SOCMERGED.config.server.modules.elastalertengine.rulesRepos.airgap}) %} +{% do SOCMERGED.config.server.modules.strelkaengine.update({'rulesRepos': SOCMERGED.config.server.modules.strelkaengine.rulesRepos.airgap}) %} +{% else %} +{% do SOCMERGED.config.server.modules.elastalertengine.update({'rulesRepos': SOCMERGED.config.server.modules.elastalertengine.rulesRepos.default}) %} +{% do SOCMERGED.config.server.modules.strelkaengine.update({'rulesRepos': SOCMERGED.config.server.modules.strelkaengine.rulesRepos.default}) %} +{% endif %} + {# remove these modules if detections is disabled #} {% if not SOCMERGED.config.server.client.detectionsEnabled %} {% do SOCMERGED.config.server.modules.pop('elastalertengine') %} diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index a9d6bac08..01308f73f 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -107,11 +107,13 @@ soc: advanced: True helpLink: sigma.html rulesRepos: - description: 'Custom Git repos to pull Sigma rules from. "license" field is required, "folder" is optional. "community" disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled.' - global: True - advanced: True - forcedType: "[]{}" - helpLink: sigma.html + default: &eerulesRepos + description: 'Custom Git repos to pull Sigma rules from. "license" field is required, "folder" is optional. "community" disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled.' + global: True + advanced: True + forcedType: "[]{}" + helpLink: sigma.html + airgap: *eerulesRepos sigmaRulePackages: description: 'Defines the Sigma Community Ruleset you want to run. One of these (core | core+ | core++ | all ) as well as an optional Add-on (emerging_threats_addon). Once you have changed the ruleset here, you will need to wait for the rule update to take place (every 8 hours), or you can force the update by nagivating to Detections --> Options dropdown menu --> Elastalert --> Full Update. WARNING! Changing the ruleset will remove all existing Sigma rules of the previous ruleset and their associated overrides. This removal cannot be undone.' global: True @@ -205,11 +207,13 @@ soc: advanced: True helpLink: yara.html rulesRepos: - description: 'Custom Git repos to pull YARA rules from. "license" field is required, "folder" is optional. "community" disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled.'' - global: True - advanced: True - forcedType: "[]{}" - helpLink: yara.html + default: &serulesRepos + description: 'Custom Git repos to pull YARA rules from. "license" field is required, "folder" is optional. "community" disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled.'' + global: True + advanced: True + forcedType: "[]{}" + helpLink: yara.html + airgap: *serulesRepos suricataengine: allowRegex: description: 'Regex used to filter imported Suricata rules. Deny regex takes precedence over the Allow regex setting.' From 38f74d2e9e8e17262a44d21fb515c9cc7ab73053 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 6 May 2024 11:38:30 -0400 Subject: [PATCH 509/777] change quotes --- salt/soc/soc_soc.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index 01308f73f..67305d4e9 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -108,7 +108,7 @@ soc: helpLink: sigma.html rulesRepos: default: &eerulesRepos - description: 'Custom Git repos to pull Sigma rules from. "license" field is required, "folder" is optional. "community" disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled.' + description: "Custom Git repos to pull Sigma rules from. 'license' field is required, 'folder' is optional. 'community' disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled." global: True advanced: True forcedType: "[]{}" @@ -208,7 +208,7 @@ soc: helpLink: yara.html rulesRepos: default: &serulesRepos - description: 'Custom Git repos to pull YARA rules from. "license" field is required, "folder" is optional. "community" disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled.'' + description: "Custom Git repos to pull YARA rules from. 'license' field is required, 'folder' is optional. 'community' disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled." global: True advanced: True forcedType: "[]{}" From be1758aea71f308a2aa2fd4204da80a9015b2a8e Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Mon, 6 May 2024 12:22:44 -0400 Subject: [PATCH 510/777] Fix license and folder --- salt/soc/defaults.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 1f96c63a8..5ae1497f0 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1281,7 +1281,8 @@ soc: community: true airgap: - repo: file:///nsm/rules/detect-sigma/repos/securityonion-resources - license: DRL + license: Elastic-2.0 + folder: sigma/stable community: true sigmaRulePackages: - core From 554a2035414f0ddea0e01b4f8acaac55233251d9 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 6 May 2024 12:59:45 -0400 Subject: [PATCH 511/777] update airgapEnabled in map file --- salt/soc/defaults.yaml | 1 - salt/soc/merged.map.jinja | 2 ++ salt/soc/soc_soc.yaml | 5 ----- 3 files changed, 2 insertions(+), 6 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 1f96c63a8..582f0af82 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1246,7 +1246,6 @@ soc: maxPacketCount: 5000 htmlDir: html importUploadDir: /nsm/soc/uploads - airgapEnabled: false modules: cases: soc filedatastore: diff --git a/salt/soc/merged.map.jinja b/salt/soc/merged.map.jinja index e31fabf2a..f23d9c115 100644 --- a/salt/soc/merged.map.jinja +++ b/salt/soc/merged.map.jinja @@ -41,9 +41,11 @@ {% if GLOBALS.airgap %} {% do SOCMERGED.config.server.modules.elastalertengine.update({'rulesRepos': SOCMERGED.config.server.modules.elastalertengine.rulesRepos.airgap}) %} {% do SOCMERGED.config.server.modules.strelkaengine.update({'rulesRepos': SOCMERGED.config.server.modules.strelkaengine.rulesRepos.airgap}) %} +{% do SOCMERGED.config.server.update({'airgapEnabled': true}) %} {% else %} {% do SOCMERGED.config.server.modules.elastalertengine.update({'rulesRepos': SOCMERGED.config.server.modules.elastalertengine.rulesRepos.default}) %} {% do SOCMERGED.config.server.modules.strelkaengine.update({'rulesRepos': SOCMERGED.config.server.modules.strelkaengine.rulesRepos.default}) %} +{% do SOCMERGED.config.server.update({'airgapEnabled': false}) %} {% endif %} {# remove these modules if detections is disabled #} diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index 67305d4e9..2b1e83ec4 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -119,11 +119,6 @@ soc: global: True advanced: False helpLink: sigma.html - airgapEnabled: - description: 'This setting dynamically changes to the current status of Airgap on this system and is used during the Sigma ruleset update process.' - global: True - advanced: True - helpLink: sigma.html elastic: index: description: Comma-separated list of indices or index patterns (wildcard "*" supported) that SOC will search for records. From 5aa611302a7cdf3a1f6159758710dd7ab20141f3 Mon Sep 17 00:00:00 2001 From: Wes Date: Mon, 6 May 2024 19:08:01 +0000 Subject: [PATCH 512/777] Handle YARA rules for distributed deployments --- salt/allowed_states.map.jinja | 3 +++ salt/strelka/config.sls | 9 +++++++++ salt/top.sls | 3 +++ 3 files changed, 15 insertions(+) diff --git a/salt/allowed_states.map.jinja b/salt/allowed_states.map.jinja index 7fbf4ff14..109e244d7 100644 --- a/salt/allowed_states.map.jinja +++ b/salt/allowed_states.map.jinja @@ -65,6 +65,7 @@ 'registry', 'manager', 'nginx', + 'strelka.manager', 'soc', 'kratos', 'influxdb', @@ -91,6 +92,7 @@ 'nginx', 'telegraf', 'influxdb', + 'strelka.manager', 'soc', 'kratos', 'elasticfleet', @@ -111,6 +113,7 @@ 'nginx', 'telegraf', 'influxdb', + 'strelka.manager', 'soc', 'kratos', 'elastic-fleet-package-registry', diff --git a/salt/strelka/config.sls b/salt/strelka/config.sls index 90bba58a7..c65f9c2cb 100644 --- a/salt/strelka/config.sls +++ b/salt/strelka/config.sls @@ -29,6 +29,15 @@ strelkarulesdir: - group: 939 - makedirs: True +{%- if grains.role in ['so-sensor', 'so-heavynode'] %} +strelkasensorrules: + file.managed: + - name: /opt/so/conf/strelka/rules/compiled/rules.compiled + - source: salt://strelka/rules/compiled/rules.compiled + - user: 939 + - group: 939 +{%- endif %} + strelkareposdir: file.directory: - name: /opt/so/conf/strelka/repos diff --git a/salt/top.sls b/salt/top.sls index d4852aa4d..e4eaab786 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -87,6 +87,7 @@ base: - registry - nginx - influxdb + - strelka.manager - soc - kratos - firewall @@ -161,6 +162,7 @@ base: - registry - nginx - influxdb + - strelka.manager - soc - kratos - firewall @@ -210,6 +212,7 @@ base: - manager - nginx - influxdb + - strelka.manager - soc - kratos - sensoroni From 445fb316342089293bc45efe3c6e24e006e9413a Mon Sep 17 00:00:00 2001 From: Wes Date: Mon, 6 May 2024 19:09:37 +0000 Subject: [PATCH 513/777] Add manager SLS --- salt/strelka/compile_yara.py | 67 ++++++++++++++++++++++++++++++++++++ salt/strelka/manager.sls | 45 ++++++++++++++++++++++++ 2 files changed, 112 insertions(+) create mode 100644 salt/strelka/compile_yara.py create mode 100644 salt/strelka/manager.sls diff --git a/salt/strelka/compile_yara.py b/salt/strelka/compile_yara.py new file mode 100644 index 000000000..dc77980d2 --- /dev/null +++ b/salt/strelka/compile_yara.py @@ -0,0 +1,67 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +import os +import yara +import glob +import json +from concurrent.futures import ThreadPoolExecutor + +def check_syntax(rule_file): + try: + # Testing if compilation throws a syntax error, don't save the result + yara.compile(filepath=rule_file) + return (True, rule_file, None) + except yara.SyntaxError as e: + # Return the error message for logging purposes + return (False, rule_file, str(e)) + +def compile_yara_rules(rules_dir): + compiled_dir = os.path.join(rules_dir, "compiled") + compiled_rules_path = [ os.path.join(compiled_dir, "rules.compiled"), "/opt/so/saltstack/default/salt/strelka/rules/compiled/rules.compiled" ] + rule_files = glob.glob(os.path.join(rules_dir, '**/*.yar'), recursive=True) + files_to_compile = {} + removed_count = 0 + success_count = 0 + + # Use ThreadPoolExecutor to parallelize syntax checks + with ThreadPoolExecutor() as executor: + results = executor.map(check_syntax, rule_files) + + # Collect yara files and prepare for batch compilation + for success, rule_file, error_message in results: + if success: + files_to_compile[os.path.basename(rule_file)] = rule_file + success_count += 1 + else: + # Extract just the UUID from the rule file name + rule_id = os.path.splitext(os.path.basename(rule_file))[0] + log_entry = { + "event_module": "soc", + "event_dataset": "soc.detections", + "log.level": "error", + "error_message": error_message, + "error_analysis": "Syntax Error", + "detection_type": "YARA", + "rule_uuid": rule_id, + "error_type": "runtime_status" + } + with open('/opt/sensoroni/logs/detections_runtime-status_yara.log', 'a') as log_file: + json.dump(log_entry, log_file) + log_file.write('\n') # Ensure new entries start on new lines + os.remove(rule_file) + removed_count += 1 + + # Compile all remaining valid rules into a single file + if files_to_compile: + compiled_rules = yara.compile(filepaths=files_to_compile) + for path in compiled_rules_path: + compiled_rules.save(path) + print(f"All remaining rules compiled and saved into {path}") + + # Print summary of compilation results + print(f"Summary: {success_count} rules compiled successfully, {removed_count} rules removed due to errors.") + +compile_yara_rules("/opt/sensoroni/yara/rules/") diff --git a/salt/strelka/manager.sls b/salt/strelka/manager.sls new file mode 100644 index 000000000..1c56a18fd --- /dev/null +++ b/salt/strelka/manager.sls @@ -0,0 +1,45 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% from 'allowed_states.map.jinja' import allowed_states %} +{% if sls in allowed_states %} + +# Strelka config +strelkaconfdir: + file.directory: + - name: /opt/so/conf/strelka/rules/compiled/ + - user: 939 + - group: 939 + - makedirs: True + +strelkacompileyara: + file.managed: + - name: /opt/so/conf/strelka/compile_yara.py + - source: salt://strelka/compile_yara/compile_yara.py + - user: 939 + - group: 939 + - makedirs: True + +strelkarulesdir: + file.directory: + - name: /opt/so/conf/strelka/rules + - user: 939 + - group: 939 + - makedirs: True + +strelkareposdir: + file.directory: + - name: /opt/so/conf/strelka/repos + - user: 939 + - group: 939 + - makedirs: True + +{% else %} + +{{sls}}_state_not_allowed: + test.fail_without_changes: + - name: {{sls}}_state_not_allowed + +{% endif %} From d2fa77ae1074accd831129fc96c0a62d9a5d0cf1 Mon Sep 17 00:00:00 2001 From: Wes Date: Mon, 6 May 2024 19:10:41 +0000 Subject: [PATCH 514/777] Update compile script --- salt/strelka/compile_yara.py | 67 ----------------------- salt/strelka/compile_yara/compile_yara.py | 9 +-- 2 files changed, 5 insertions(+), 71 deletions(-) delete mode 100644 salt/strelka/compile_yara.py diff --git a/salt/strelka/compile_yara.py b/salt/strelka/compile_yara.py deleted file mode 100644 index dc77980d2..000000000 --- a/salt/strelka/compile_yara.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -import os -import yara -import glob -import json -from concurrent.futures import ThreadPoolExecutor - -def check_syntax(rule_file): - try: - # Testing if compilation throws a syntax error, don't save the result - yara.compile(filepath=rule_file) - return (True, rule_file, None) - except yara.SyntaxError as e: - # Return the error message for logging purposes - return (False, rule_file, str(e)) - -def compile_yara_rules(rules_dir): - compiled_dir = os.path.join(rules_dir, "compiled") - compiled_rules_path = [ os.path.join(compiled_dir, "rules.compiled"), "/opt/so/saltstack/default/salt/strelka/rules/compiled/rules.compiled" ] - rule_files = glob.glob(os.path.join(rules_dir, '**/*.yar'), recursive=True) - files_to_compile = {} - removed_count = 0 - success_count = 0 - - # Use ThreadPoolExecutor to parallelize syntax checks - with ThreadPoolExecutor() as executor: - results = executor.map(check_syntax, rule_files) - - # Collect yara files and prepare for batch compilation - for success, rule_file, error_message in results: - if success: - files_to_compile[os.path.basename(rule_file)] = rule_file - success_count += 1 - else: - # Extract just the UUID from the rule file name - rule_id = os.path.splitext(os.path.basename(rule_file))[0] - log_entry = { - "event_module": "soc", - "event_dataset": "soc.detections", - "log.level": "error", - "error_message": error_message, - "error_analysis": "Syntax Error", - "detection_type": "YARA", - "rule_uuid": rule_id, - "error_type": "runtime_status" - } - with open('/opt/sensoroni/logs/detections_runtime-status_yara.log', 'a') as log_file: - json.dump(log_entry, log_file) - log_file.write('\n') # Ensure new entries start on new lines - os.remove(rule_file) - removed_count += 1 - - # Compile all remaining valid rules into a single file - if files_to_compile: - compiled_rules = yara.compile(filepaths=files_to_compile) - for path in compiled_rules_path: - compiled_rules.save(path) - print(f"All remaining rules compiled and saved into {path}") - - # Print summary of compilation results - print(f"Summary: {success_count} rules compiled successfully, {removed_count} rules removed due to errors.") - -compile_yara_rules("/opt/sensoroni/yara/rules/") diff --git a/salt/strelka/compile_yara/compile_yara.py b/salt/strelka/compile_yara/compile_yara.py index ece3c6a9e..dc77980d2 100644 --- a/salt/strelka/compile_yara/compile_yara.py +++ b/salt/strelka/compile_yara/compile_yara.py @@ -20,7 +20,7 @@ def check_syntax(rule_file): def compile_yara_rules(rules_dir): compiled_dir = os.path.join(rules_dir, "compiled") - compiled_rules_path = os.path.join(compiled_dir, "rules.compiled") + compiled_rules_path = [ os.path.join(compiled_dir, "rules.compiled"), "/opt/so/saltstack/default/salt/strelka/rules/compiled/rules.compiled" ] rule_files = glob.glob(os.path.join(rules_dir, '**/*.yar'), recursive=True) files_to_compile = {} removed_count = 0 @@ -57,10 +57,11 @@ def compile_yara_rules(rules_dir): # Compile all remaining valid rules into a single file if files_to_compile: compiled_rules = yara.compile(filepaths=files_to_compile) - compiled_rules.save(compiled_rules_path) - print(f"All remaining rules compiled and saved into {compiled_rules_path}") + for path in compiled_rules_path: + compiled_rules.save(path) + print(f"All remaining rules compiled and saved into {path}") # Print summary of compilation results print(f"Summary: {success_count} rules compiled successfully, {removed_count} rules removed due to errors.") -compile_yara_rules("/opt/sensoroni/yara/rules/") \ No newline at end of file +compile_yara_rules("/opt/sensoroni/yara/rules/") From 5056ec526bb4c032b82df102e5955a445d8e6cee Mon Sep 17 00:00:00 2001 From: Wes Date: Mon, 6 May 2024 19:27:38 +0000 Subject: [PATCH 515/777] Add compiled directory --- salt/strelka/rules/compiled/DO.NOT.TOUCH | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 salt/strelka/rules/compiled/DO.NOT.TOUCH diff --git a/salt/strelka/rules/compiled/DO.NOT.TOUCH b/salt/strelka/rules/compiled/DO.NOT.TOUCH new file mode 100644 index 000000000..e69de29bb From 1e48955376543806d610443f20ae1897d6e776df Mon Sep 17 00:00:00 2001 From: Wes Date: Mon, 6 May 2024 19:39:03 +0000 Subject: [PATCH 516/777] Restart when rules change --- salt/strelka/backend/enabled.sls | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/strelka/backend/enabled.sls b/salt/strelka/backend/enabled.sls index a626924b1..ffb1df257 100644 --- a/salt/strelka/backend/enabled.sls +++ b/salt/strelka/backend/enabled.sls @@ -42,8 +42,8 @@ strelka_backend: {% endfor %} {% endif %} - restart_policy: on-failure - #- watch: - #- file: strelkarules + - watch: + - file: /opt/so/conf/strelka/rules/compiled/rules.compiled delete_so-strelka-backend_so-status.disabled: file.uncomment: From 4ebe070cd8b9e916087b0177911f8699107a7b7b Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Mon, 6 May 2024 19:03:12 -0400 Subject: [PATCH 517/777] test regexes for detections --- salt/soc/soc_soc.yaml | 7 +++++++ setup/so-functions | 7 +++++++ setup/so-setup | 3 +++ 3 files changed, 17 insertions(+) diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index 4b88a5f84..c3bb525a3 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -135,6 +135,7 @@ soc: description: Duration (in milliseconds) to wait for a response from the Elasticsearch host before giving up and showing an error on the SOC UI. global: True advanced: True + forcedType: int casesEnabled: description: Set to true if the SOC case management module, natively integrated with Elasticsearch, should be enabled. global: True @@ -179,10 +180,12 @@ soc: description: Duration (in milliseconds) to wait for a response from the Salt API when executing tasks known for being long running before giving up and showing an error on the SOC UI. global: True advanced: True + forcedType: int relayTimeoutMs: description: Duration (in milliseconds) to wait for a response from the Salt API when executing common grid management tasks before giving up and showing an error on the SOC UI. global: True advanced: True + forcedType: int strelkaengine: allowRegex: description: 'Regex used to filter imported Yara rules. Deny regex takes precedence over the Allow regex setting.' @@ -242,17 +245,21 @@ soc: description: Duration (in milliseconds) to wait for a response from the SOC server API before giving up and showing an error on the SOC UI. global: True advanced: True + forcedType: int webSocketTimeoutMs: description: Duration (in milliseconds) to wait for a response from the SOC server websocket before giving up and reconnecting. global: True advanced: True + forcedType: int tipTimeoutMs: description: Duration (in milliseconds) to show the popup tips, which typically indicate a successful operation. global: True + forcedType: int cacheExpirationMs: description: Duration (in milliseconds) of cached data within the browser, including users and settings. global: True advanced: True + forcedType: int casesEnabled: description: Set to true to enable case management in SOC. global: True diff --git a/setup/so-functions b/setup/so-functions index 7afc0a883..80ad0be6a 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1257,6 +1257,13 @@ soc_pillar() { " config:"\ " server:"\ " srvKey: '$SOCSRVKEY'"\ + " modules:"\ + " elastalertengine:"\ + " allowRegex: '$ELASTALERT_ALLOW_REGEX'"\ + " strelkaengine:"\ + " allowRegex: '$STRELKA_ALLOW_REGEX'"\ + " suricataengine:"\ + " allowRegex: '$SURICATA_ALLOW_REGEX'"\ "" > "$soc_pillar_file" if [[ $telemetry -ne 0 ]]; then diff --git a/setup/so-setup b/setup/so-setup index 8a1879c58..9ce99d2d2 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -245,6 +245,9 @@ if [ -n "$test_profile" ]; then WEBUSER=onionuser@somewhere.invalid WEBPASSWD1=0n10nus3r WEBPASSWD2=0n10nus3r + STRELKA_ALLOW_REGEX="EquationGroup_Toolset_Apr17__ELV_.*" + ELASTALERT_ALLOW_REGEX="Security Onion" + SURICATA_ALLOW_REGEX="200033\\d" update_sudoers_for_testing fi From bee8c2c1ce15f7033a1dfcfd9127df73ecbdf87b Mon Sep 17 00:00:00 2001 From: Wes Date: Tue, 7 May 2024 13:21:59 +0000 Subject: [PATCH 518/777] Remove watch --- salt/strelka/backend/enabled.sls | 2 -- 1 file changed, 2 deletions(-) diff --git a/salt/strelka/backend/enabled.sls b/salt/strelka/backend/enabled.sls index ffb1df257..1de22f404 100644 --- a/salt/strelka/backend/enabled.sls +++ b/salt/strelka/backend/enabled.sls @@ -42,8 +42,6 @@ strelka_backend: {% endfor %} {% endif %} - restart_policy: on-failure - - watch: - - file: /opt/so/conf/strelka/rules/compiled/rules.compiled delete_so-strelka-backend_so-status.disabled: file.uncomment: From dcc1f656ee68cea737675c6fdb7a524c28bb5cf5 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 7 May 2024 10:13:51 -0400 Subject: [PATCH 519/777] predownload logstash and elastic for new searchnode and heavynode --- salt/elasticsearch/addsearchnode.sls | 29 ++++++++++++++++++++++++++++ salt/elasticsearch/download.sls | 20 +++++++++++++++++++ salt/logstash/download.sls | 20 +++++++++++++++++++ salt/manager/tools/sbin/so-minion | 4 ++++ salt/orch/container_download.sls | 10 ++++++++++ salt/top.sls | 6 +++--- 6 files changed, 86 insertions(+), 3 deletions(-) create mode 100644 salt/elasticsearch/addsearchnode.sls create mode 100644 salt/elasticsearch/download.sls create mode 100644 salt/logstash/download.sls create mode 100644 salt/orch/container_download.sls diff --git a/salt/elasticsearch/addsearchnode.sls b/salt/elasticsearch/addsearchnode.sls new file mode 100644 index 000000000..c5b40df4a --- /dev/null +++ b/salt/elasticsearch/addsearchnode.sls @@ -0,0 +1,29 @@ +so-soc container extrahosts +seed_hosts elasticsearch.yaml +so-elasticsearch container extrahosts +so-logstash container extrahosts + + ID: elasticfleet_sbin_jinja + Function: file.recurse + Name: /usr/sbin + Result: True + Comment: Recursively updated /usr/sbin + Started: 19:56:53.468894 + Duration: 951.706 ms + Changes: + ---------- + /usr/sbin/so-elastic-fleet-artifacts-url-update: + ---------- + diff: + --- + +++ + @@ -26,7 +26,7 @@ + } + + # Query for the current Grid Nodes that are running Logstash (which includes Fleet Nodes) + -LOGSTASHNODES='{"manager": {"jpp70man1": {"ip": "10.66.166.231"}}, "searchnode": {"jpp70sea1": {"ip": "10.66.166.232"}, "jpp70sea2": {"ip": "10.66.166.142"}}}' + +LOGSTASHNODES='{"manager": {"jpp70man1": {"ip": "10.66.166.231"}}, "searchnode": {"jpp70sea1": {"ip": "10.66.166.232"}}}' + + # Initialize an array for new hosts from Fleet Nodes + declare -a NEW_LIST=() + diff --git a/salt/elasticsearch/download.sls b/salt/elasticsearch/download.sls new file mode 100644 index 000000000..f74c7059a --- /dev/null +++ b/salt/elasticsearch/download.sls @@ -0,0 +1,20 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% from 'allowed_states.map.jinja' import allowed_states %} +{% if sls.split('.')[0] in allowed_states %} +{% from 'vars/globals.map.jinja' import GLOBALS %} + +so-elasticsearch_image: + docker_image.present: + - name: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elasticsearch:{{ GLOBALS.so_version }} + +{% else %} + +{{sls}}_state_not_allowed: + test.fail_without_changes: + - name: {{sls}}_state_not_allowed + +{% endif %} diff --git a/salt/logstash/download.sls b/salt/logstash/download.sls new file mode 100644 index 000000000..cf1c6176c --- /dev/null +++ b/salt/logstash/download.sls @@ -0,0 +1,20 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% from 'allowed_states.map.jinja' import allowed_states %} +{% if sls.split('.')[0] in allowed_states %} +{% from 'vars/globals.map.jinja' import GLOBALS %} + +so-logstash_image: + docker_image.present: + - name: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-logstash:{{ GLOBALS.so_version }} + +{% else %} + +{{sls}}_state_not_allowed: + test.fail_without_changes: + - name: {{sls}}_state_not_allowed + +{% endif %} diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index 79eea59fe..e0e892c3d 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -604,6 +604,10 @@ function updateMineAndApplyStates() { #checkMine "network.ip_addrs" # calls so-common and set_minionid sets MINIONID to local minion id set_minionid + # if this is a searchnode or heavynode, start downloading logstash and elasticsearch containers while the manager prepares for the new node + if [[ "$NODETYPE" == "SEARCHNODE" || "$NODETYPE" == "HEAVYNODE" ]]; then + salt-run state.orch orch.container_download pillar="{'setup': {'newnode': $MINION_ID }}" > /dev/null 2>&1 & + fi # $MINIONID is the minion id of the manager and $MINION_ID is the target node or the node being configured salt-run state.orch orch.deploy_newnode pillar="{'setup': {'manager': $MINIONID, 'newnode': $MINION_ID }}" > /dev/null 2>&1 & } diff --git a/salt/orch/container_download.sls b/salt/orch/container_download.sls new file mode 100644 index 000000000..c4aedaaba --- /dev/null +++ b/salt/orch/container_download.sls @@ -0,0 +1,10 @@ +{% set NEWNODE = salt['pillar.get']('setup:newnode') %} + +{% if NEWNODE.split('_')|last in ['searchnode', 'heavynode'] %} +{{NEWNODE}}_download_logstash_elasticsearch: + salt.state: + - tgt: {{ NEWNODE }} + - sls: + - logstash.download + - elasticsearch.download +{% endif %} diff --git a/salt/top.sls b/salt/top.sls index d4852aa4d..2510356c4 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -143,13 +143,13 @@ base: '*_searchnode and G@saltversion:{{saltversion}}': - match: compound + - firewall - ssl + - elasticsearch + - logstash - sensoroni - telegraf - nginx - - firewall - - elasticsearch - - logstash - elasticfleet.install_agent_grid - stig From 2e70d157e27b7b2b8f0d5dadfadeb351f78cb43e Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 7 May 2024 11:13:51 -0400 Subject: [PATCH 520/777] Add ref --- salt/elasticfleet/defaults.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/elasticfleet/defaults.yaml b/salt/elasticfleet/defaults.yaml index 2af7e7532..2d9ab97a1 100644 --- a/salt/elasticfleet/defaults.yaml +++ b/salt/elasticfleet/defaults.yaml @@ -37,6 +37,7 @@ elasticfleet: - azure - barracuda - carbonblack_edr + - cef - checkpoint - cisco_asa - cisco_duo @@ -122,4 +123,4 @@ elasticfleet: base_url: http://localhost:2501 poll_interval: 1m api_key: - enabled_nodes: [] \ No newline at end of file + enabled_nodes: [] From 1da88b70ac3d7390630d7363b2a09213f5443e04 Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Mon, 6 May 2024 09:56:24 -0600 Subject: [PATCH 521/777] Specify Error Retry Wait and Error Limit for All Detection Engines If a sync errors out, the engine will wait `communityRulesImportErrorSeconds` seconds instead of the usual `communityRulesImportFrequencySeconds` seconds wait. If `failAfterConsecutiveErrorCount` errors happen in a row when syncing detections to ElasticSearch then the sync is considered a failure and will give up and try again later. This assumes ElasticSearch is the source of the errors and backs of in hopes it'll be able to fix itself. --- salt/soc/defaults.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index d76a0a0e4..04a66dc94 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1279,6 +1279,8 @@ soc: - securityonion-resources+critical - securityonion-resources+high communityRulesImportFrequencySeconds: 28800 + communityRulesImportErrorSeconds: 300 + failAfterConsecutiveErrorCount: 10 denyRegex: '' elastAlertRulesFolder: /opt/sensoroni/elastalert reposFolder: /opt/sensoroni/sigma/repos @@ -1346,6 +1348,8 @@ soc: - securityonion-yara autoUpdateEnabled: true communityRulesImportFrequencySeconds: 28800 + communityRulesImportErrorSeconds: 300 + failAfterConsecutiveErrorCount: 10 compileYaraPythonScriptPath: /opt/sensoroni/yara/compile_yara.py denyRegex: '' reposFolder: /opt/sensoroni/yara/repos @@ -1364,6 +1368,8 @@ soc: allowRegex: '' autoUpdateEnabled: true communityRulesImportFrequencySeconds: 28800 + communityRulesImportErrorSeconds: 300 + failAfterConsecutiveErrorCount: 10 communityRulesFile: /nsm/rules/suricata/emerging-all.rules denyRegex: '' rulesFingerprintFile: /opt/sensoroni/fingerprints/emerging-all.fingerprint From 8364b2a7308931d3422792bb069e78be36db4765 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Tue, 7 May 2024 14:30:52 -0400 Subject: [PATCH 522/777] update for testing --- setup/so-setup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-setup b/setup/so-setup index 9ce99d2d2..b76f9bb98 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -247,7 +247,7 @@ if [ -n "$test_profile" ]; then WEBPASSWD2=0n10nus3r STRELKA_ALLOW_REGEX="EquationGroup_Toolset_Apr17__ELV_.*" ELASTALERT_ALLOW_REGEX="Security Onion" - SURICATA_ALLOW_REGEX="200033\\d" + SURICATA_ALLOW_REGEX="(200033\\d|2100538|2102466)" update_sudoers_for_testing fi From 2eee61778842c136466451ea3da7ea696764b109 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 7 May 2024 17:21:01 -0400 Subject: [PATCH 523/777] Update soc_idstools.yaml --- salt/idstools/soc_idstools.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/idstools/soc_idstools.yaml b/salt/idstools/soc_idstools.yaml index ce8b56569..d1cca0028 100644 --- a/salt/idstools/soc_idstools.yaml +++ b/salt/idstools/soc_idstools.yaml @@ -16,6 +16,8 @@ idstools: urls: description: This is a list of additional rule download locations. global: True + multiline: True + forcedType: "[]string" helpLink: rules.html sids: disabled: From 326c59bb264cb045a88acd50560e007447769c5d Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 8 May 2024 08:42:38 -0400 Subject: [PATCH 524/777] Update soc_idstools.yaml --- salt/idstools/soc_idstools.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/idstools/soc_idstools.yaml b/salt/idstools/soc_idstools.yaml index d1cca0028..698a7a1fc 100644 --- a/salt/idstools/soc_idstools.yaml +++ b/salt/idstools/soc_idstools.yaml @@ -14,10 +14,11 @@ idstools: regex: ETPRO\b|ETOPEN\b helpLink: rules.html urls: - description: This is a list of additional rule download locations. + description: This is a list of additional rule download locations. This feature is currently disabled. global: True multiline: True forcedType: "[]string" + readonly: True helpLink: rules.html sids: disabled: From 6d2ecce9b741316b56019524f4699638b22fc4a2 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Wed, 8 May 2024 08:43:37 -0400 Subject: [PATCH 525/777] remove old yara airgap code --- salt/manager/tools/sbin/soup | 6 ------ 1 file changed, 6 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index fa3c3b5ee..285882748 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -734,12 +734,6 @@ unmount_update() { update_airgap_rules() { # Copy the rules over to update them for airgap. rsync -av $UPDATE_DIR/agrules/suricata/* /nsm/rules/suricata/ - rsync -av $UPDATE_DIR/agrules/yara/* /nsm/rules/yara/ - if [ -d /nsm/repo/rules/sigma ]; then - rsync -av $UPDATE_DIR/agrules/sigma/* /nsm/repo/rules/sigma/ - fi - - # SOC Detections Airgap rsync -av $UPDATE_DIR/agrules/detect-sigma/* /nsm/rules/detect-sigma/ rsync -av $UPDATE_DIR/agrules/detect-yara/* /nsm/rules/detect-yara/ } From 5dc098f0fc6e71a9e3b2f9f953fa48669a337eeb Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 8 May 2024 08:54:24 -0400 Subject: [PATCH 526/777] remove test file --- salt/elasticsearch/addsearchnode.sls | 29 ---------------------------- 1 file changed, 29 deletions(-) delete mode 100644 salt/elasticsearch/addsearchnode.sls diff --git a/salt/elasticsearch/addsearchnode.sls b/salt/elasticsearch/addsearchnode.sls deleted file mode 100644 index c5b40df4a..000000000 --- a/salt/elasticsearch/addsearchnode.sls +++ /dev/null @@ -1,29 +0,0 @@ -so-soc container extrahosts -seed_hosts elasticsearch.yaml -so-elasticsearch container extrahosts -so-logstash container extrahosts - - ID: elasticfleet_sbin_jinja - Function: file.recurse - Name: /usr/sbin - Result: True - Comment: Recursively updated /usr/sbin - Started: 19:56:53.468894 - Duration: 951.706 ms - Changes: - ---------- - /usr/sbin/so-elastic-fleet-artifacts-url-update: - ---------- - diff: - --- - +++ - @@ -26,7 +26,7 @@ - } - - # Query for the current Grid Nodes that are running Logstash (which includes Fleet Nodes) - -LOGSTASHNODES='{"manager": {"jpp70man1": {"ip": "10.66.166.231"}}, "searchnode": {"jpp70sea1": {"ip": "10.66.166.232"}, "jpp70sea2": {"ip": "10.66.166.142"}}}' - +LOGSTASHNODES='{"manager": {"jpp70man1": {"ip": "10.66.166.231"}}, "searchnode": {"jpp70sea1": {"ip": "10.66.166.232"}}}' - - # Initialize an array for new hosts from Fleet Nodes - declare -a NEW_LIST=() - From 0d2e5e0065435837c0572c1be57d22dbfa9771f0 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 8 May 2024 09:50:01 -0400 Subject: [PATCH 527/777] need repo and docker first --- salt/orch/container_download.sls | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/orch/container_download.sls b/salt/orch/container_download.sls index c4aedaaba..90fb4f6aa 100644 --- a/salt/orch/container_download.sls +++ b/salt/orch/container_download.sls @@ -5,6 +5,8 @@ salt.state: - tgt: {{ NEWNODE }} - sls: + - repo.client + - docker - logstash.download - elasticsearch.download {% endif %} From 1862deaf5e6706193586770f157b34b9cdbf519b Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 8 May 2024 10:14:08 -0400 Subject: [PATCH 528/777] add copyright --- salt/orch/container_download.sls | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/salt/orch/container_download.sls b/salt/orch/container_download.sls index 90fb4f6aa..aa8e19587 100644 --- a/salt/orch/container_download.sls +++ b/salt/orch/container_download.sls @@ -1,3 +1,8 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + {% set NEWNODE = salt['pillar.get']('setup:newnode') %} {% if NEWNODE.split('_')|last in ['searchnode', 'heavynode'] %} From 5a5a1e86acf1a4aee29a6fd01ccc9c1651e2474c Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Wed, 8 May 2024 13:26:36 -0400 Subject: [PATCH 529/777] FIX: Adjust so-import-pcap so that suricata works when it is pcapengine #12969 --- salt/common/tools/sbin_jinja/so-import-pcap | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/common/tools/sbin_jinja/so-import-pcap b/salt/common/tools/sbin_jinja/so-import-pcap index b8a90421f..30d5d4fc4 100755 --- a/salt/common/tools/sbin_jinja/so-import-pcap +++ b/salt/common/tools/sbin_jinja/so-import-pcap @@ -89,6 +89,7 @@ function suricata() { -v ${LOG_PATH}:/var/log/suricata/:rw \ -v ${NSM_PATH}/:/nsm/:rw \ -v "$PCAP:/input.pcap:ro" \ + -v /dev/null:/nsm/suripcap:rw \ -v /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro \ {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-suricata:{{ VERSION }} \ --runmode single -k none -r /input.pcap > $LOG_PATH/console.log 2>&1 From 5b7b6e5fb876d7c333603007e5e602e4c8a163ec Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Wed, 8 May 2024 14:00:23 -0400 Subject: [PATCH 530/777] FEATURE: Add more fields to the SOC Dashboards URL for so-import-pcap #12972 --- salt/common/tools/sbin_jinja/so-import-pcap | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/common/tools/sbin_jinja/so-import-pcap b/salt/common/tools/sbin_jinja/so-import-pcap index 30d5d4fc4..d3886305e 100755 --- a/salt/common/tools/sbin_jinja/so-import-pcap +++ b/salt/common/tools/sbin_jinja/so-import-pcap @@ -248,7 +248,7 @@ fi START_OLDEST_SLASH=$(echo $START_OLDEST | sed -e 's/-/%2F/g') END_NEWEST_SLASH=$(echo $END_NEWEST | sed -e 's/-/%2F/g') if [[ $VALID_PCAPS_COUNT -gt 0 ]] || [[ $SKIPPED_PCAPS_COUNT -gt 0 ]]; then - URL="https://{{ URLBASE }}/#/dashboards?q=$HASH_FILTERS%20%7C%20groupby%20-sankey%20event.dataset%20event.category%2a%20%7C%20groupby%20-pie%20event.category%20%7C%20groupby%20-bar%20event.module%20%7C%20groupby%20event.dataset%20%7C%20groupby%20event.module%20%7C%20groupby%20event.category%20%7C%20groupby%20observer.name%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC" + URL="https://{{ URLBASE }}/#/dashboards?q=$HASH_FILTERS%20%7C%20groupby%20event.module*%20%7C%20groupby%20-sankey%20event.module*%20event.dataset%20%7C%20groupby%20event.dataset%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port%20%7C%20groupby%20network.protocol%20%7C%20groupby%20rule.name%20rule.category%20event.severity_label%20%7C%20groupby%20dns.query.name%20%7C%20groupby%20file.mime_type%20%7C%20groupby%20http.virtual_host%20http.uri%20%7C%20groupby%20notice.note%20notice.message%20notice.sub_message%20%7C%20groupby%20ssl.server_name%20%7C%20groupby%20source_geo.organization_name%20source.geo.country_name%20%7C%20groupby%20destination_geo.organization_name%20destination.geo.country_name&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC" status "Import complete!" status From 77e21170515b2f7e433ad7954414d9d579f64166 Mon Sep 17 00:00:00 2001 From: Wes Date: Wed, 8 May 2024 18:47:52 +0000 Subject: [PATCH 531/777] Account for 0 active rules and change watch --- salt/strelka/backend/enabled.sls | 2 ++ salt/strelka/config.sls | 8 +++++--- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/salt/strelka/backend/enabled.sls b/salt/strelka/backend/enabled.sls index 1de22f404..863115eda 100644 --- a/salt/strelka/backend/enabled.sls +++ b/salt/strelka/backend/enabled.sls @@ -42,6 +42,8 @@ strelka_backend: {% endfor %} {% endif %} - restart_policy: on-failure + - watch: + - file: /opt/so/conf/strelka/rules/compiled/* delete_so-strelka-backend_so-status.disabled: file.uncomment: diff --git a/salt/strelka/config.sls b/salt/strelka/config.sls index c65f9c2cb..4d3686c41 100644 --- a/salt/strelka/config.sls +++ b/salt/strelka/config.sls @@ -31,11 +31,13 @@ strelkarulesdir: {%- if grains.role in ['so-sensor', 'so-heavynode'] %} strelkasensorrules: - file.managed: - - name: /opt/so/conf/strelka/rules/compiled/rules.compiled - - source: salt://strelka/rules/compiled/rules.compiled + file.recurse: + - name: /opt/so/conf/strelka/rules/compiled/ + - source: salt://strelka/rules/compiled/ - user: 939 - group: 939 + - file_mode: 755 + - clean: True {%- endif %} strelkareposdir: From ad9fdf064b579ec2399da5755d20209461feff2a Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 8 May 2024 15:24:29 -0400 Subject: [PATCH 532/777] Update config.sls --- salt/soc/config.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/soc/config.sls b/salt/soc/config.sls index b440b07fc..a85032295 100644 --- a/salt/soc/config.sls +++ b/salt/soc/config.sls @@ -82,7 +82,7 @@ socmotd: crondetectionsruntime: cron.present: - - name: /usr/local/bin/so-detections-runtime-status cron + - name: /usr/sbin/so-detections-runtime-status cron - identifier: detections-runtime-status - user: root - minute: '*/10' From 0567b935340800e098dd088f609438e88a2a6d5d Mon Sep 17 00:00:00 2001 From: weslambert Date: Wed, 8 May 2024 15:39:59 -0400 Subject: [PATCH 533/777] Remove mode --- salt/strelka/config.sls | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/strelka/config.sls b/salt/strelka/config.sls index 4d3686c41..cd8fb2667 100644 --- a/salt/strelka/config.sls +++ b/salt/strelka/config.sls @@ -36,7 +36,6 @@ strelkasensorrules: - source: salt://strelka/rules/compiled/ - user: 939 - group: 939 - - file_mode: 755 - clean: True {%- endif %} From dff609d829a3f0311e1d56d345185b87553b7ce9 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 8 May 2024 16:13:09 -0400 Subject: [PATCH 534/777] Add basic read-only metric collection from Kafka Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/docker/defaults.yaml | 1 + salt/kafka/enabled.sls | 9 +++++---- salt/telegraf/etc/telegraf.conf | 17 +++++++++++++++++ 3 files changed, 23 insertions(+), 4 deletions(-) diff --git a/salt/docker/defaults.yaml b/salt/docker/defaults.yaml index 82ad3b6ea..161dde485 100644 --- a/salt/docker/defaults.yaml +++ b/salt/docker/defaults.yaml @@ -192,6 +192,7 @@ docker: port_bindings: - 0.0.0.0:9092:9092 - 0.0.0.0:9093:9093 + - 0.0.0.0:8778:8778 custom_bind_mounts: [] extra_hosts: [] extra_env: [] diff --git a/salt/kafka/enabled.sls b/salt/kafka/enabled.sls index 9275eca91..b01e6f2a8 100644 --- a/salt/kafka/enabled.sls +++ b/salt/kafka/enabled.sls @@ -25,7 +25,8 @@ so-kafka: - ipv4_address: {{ DOCKER.containers['so-kafka'].ip }} - user: kafka - environment: - - KAFKA_HEAP_OPTS=-Xmx2G -Xms1G + KAFKA_HEAP_OPTS: -Xmx2G -Xms1G + KAFKA_OPTS: -javaagent:/jolokia/agents/jolokia-agent-jvm-javaagent.jar=port=8778,host={{ DOCKER.containers['so-kafka'].ip }},policyLocation=file:/jolokia/jolokia.xml - extra_hosts: {% for node in KAFKANODES %} - {{ node }}:{{ KAFKANODES[node].ip }} @@ -40,10 +41,10 @@ so-kafka: - {{ BINDING }} {% endfor %} - binds: - - /etc/pki/kafka.p12:/etc/pki/kafka.p12 - - /opt/so/conf/ca/cacerts:/etc/pki/java/sos/cacerts + - /etc/pki/kafka.p12:/etc/pki/kafka.p12:ro + - /opt/so/conf/ca/cacerts:/etc/pki/java/sos/cacerts:ro - /nsm/kafka/data/:/nsm/kafka/data/:rw - - /opt/so/conf/kafka/server.properties:/kafka/config/kraft/server.properties + - /opt/so/conf/kafka/server.properties:/kafka/config/kraft/server.properties:ro {% if GLOBALS.is_manager %} - /opt/so/conf/kafka/client.properties:/kafka/config/kraft/client.properties {% endif %} diff --git a/salt/telegraf/etc/telegraf.conf b/salt/telegraf/etc/telegraf.conf index 1c5801645..42a8d43bf 100644 --- a/salt/telegraf/etc/telegraf.conf +++ b/salt/telegraf/etc/telegraf.conf @@ -243,6 +243,23 @@ password = "{{ salt['pillar.get']('elasticsearch:auth:users:so_logstash_user:pass') }}" {%- endif %} +{% if grains.role in ['so-manager','so-managersearch','so-standalone','so-receiver'] -%} +[[inputs.jolokia2_agent]] + name_prefix= "kafka_" + urls = ["http://localhost:8778/jolokia"] + +[[inputs.jolokia2_agent.metric]] + name = "topics" + mbean = "kafka.server:name=*,type=BrokerTopicMetrics" + field_prefix = "$1." + +[[inputs.jolokia2_agent.metric]] + name = "topic" + mbean = "kafka.server:name=*,topic=*,type=BrokerTopicMetrics" + field_prefix = "$1." + tag_keys = ["topic"] + +{%- endif %} # # Read metrics from one or more commands that can output to stdout {%- if 'sostatus.sh' in TELEGRAFMERGED.scripts[GLOBALS.role.split('-')[1]] %} {%- do TELEGRAFMERGED.scripts[GLOBALS.role.split('-')[1]].remove('sostatus.sh') %} From eca2a4a9c8227d5065955b03ed299eaa3b7cfdbc Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 8 May 2024 16:17:09 -0400 Subject: [PATCH 535/777] Logstash consumer threads should match topic partition count - Default is set to 3. If there are too many consumer threads it may lead to idle logstash worker threads and could require decreasing this value to saturate workers Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja b/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja index 087ed7755..3d0d03020 100644 --- a/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja +++ b/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja @@ -13,6 +13,7 @@ input { codec => json topics => ['default-topic', 'kratos-topic', 'soc-topic', 'strelka-topic', 'suricata-topic', 'zeek-topic', 'rita-topic', 'opencanary-topic', 'syslog-topic'] group_id => 'searchnodes' + consumer_threads => 3 client_id => '{{ GLOBALS.hostname }}' security_protocol => 'SSL' bootstrap_servers => '{{ bootstrap_servers }}' From 01a68568a66e8549a170d5c86ecef60b0d47a2cd Mon Sep 17 00:00:00 2001 From: weslambert Date: Wed, 8 May 2024 16:37:13 -0400 Subject: [PATCH 536/777] Use state --- salt/strelka/backend/enabled.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/strelka/backend/enabled.sls b/salt/strelka/backend/enabled.sls index 863115eda..5510ffb0f 100644 --- a/salt/strelka/backend/enabled.sls +++ b/salt/strelka/backend/enabled.sls @@ -43,7 +43,7 @@ strelka_backend: {% endif %} - restart_policy: on-failure - watch: - - file: /opt/so/conf/strelka/rules/compiled/* + - file: strelkasensorrules delete_so-strelka-backend_so-status.disabled: file.uncomment: From 3a99624eb8a06f09c3f07c1f5449762bbe910f4e Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 9 May 2024 10:03:02 -0400 Subject: [PATCH 537/777] seperate manager states for strelka --- salt/strelka/backend/enabled.sls | 2 +- salt/strelka/config.sls | 38 ++++++-------------------------- 2 files changed, 8 insertions(+), 32 deletions(-) diff --git a/salt/strelka/backend/enabled.sls b/salt/strelka/backend/enabled.sls index 5510ffb0f..a26905e1f 100644 --- a/salt/strelka/backend/enabled.sls +++ b/salt/strelka/backend/enabled.sls @@ -43,7 +43,7 @@ strelka_backend: {% endif %} - restart_policy: on-failure - watch: - - file: strelkasensorrules + - file: strelkasensorcompiledrules delete_so-strelka-backend_so-status.disabled: file.uncomment: diff --git a/salt/strelka/config.sls b/salt/strelka/config.sls index cd8fb2667..e8f211bc8 100644 --- a/salt/strelka/config.sls +++ b/salt/strelka/config.sls @@ -5,45 +5,21 @@ {% from 'allowed_states.map.jinja' import allowed_states %} {% if sls.split('.')[0] in allowed_states %} +{% from 'vars/globals.map.jinja' import GLOBALS %} + +{% if GLOBALS.is_manager %} +include: + - strelka.manager +{% endif %} # Strelka config -strelkaconfdir: - file.directory: - - name: /opt/so/conf/strelka/rules/compiled/ - - user: 939 - - group: 939 - - makedirs: True - -strelkacompileyara: - file.managed: - - name: /opt/so/conf/strelka/compile_yara.py - - source: salt://strelka/compile_yara/compile_yara.py - - user: 939 - - group: 939 - - makedirs: True - -strelkarulesdir: - file.directory: - - name: /opt/so/conf/strelka/rules - - user: 939 - - group: 939 - - makedirs: True - -{%- if grains.role in ['so-sensor', 'so-heavynode'] %} -strelkasensorrules: +strelkasensorcompiledrules: file.recurse: - name: /opt/so/conf/strelka/rules/compiled/ - source: salt://strelka/rules/compiled/ - user: 939 - group: 939 - clean: True -{%- endif %} - -strelkareposdir: - file.directory: - - name: /opt/so/conf/strelka/repos - - user: 939 - - group: 939 - makedirs: True strelkadatadir: From a74fee4cd08df30a2d43b45705d6fedf7a970679 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 9 May 2024 11:26:02 -0400 Subject: [PATCH 538/777] strelka compiled rules --- salt/strelka/compile_yara/compile_yara.py | 11 +++++---- salt/strelka/config.sls | 27 ++++++++++------------- salt/strelka/manager.sls | 13 ++--------- 3 files changed, 21 insertions(+), 30 deletions(-) diff --git a/salt/strelka/compile_yara/compile_yara.py b/salt/strelka/compile_yara/compile_yara.py index dc77980d2..b6fa95899 100644 --- a/salt/strelka/compile_yara/compile_yara.py +++ b/salt/strelka/compile_yara/compile_yara.py @@ -20,7 +20,7 @@ def check_syntax(rule_file): def compile_yara_rules(rules_dir): compiled_dir = os.path.join(rules_dir, "compiled") - compiled_rules_path = [ os.path.join(compiled_dir, "rules.compiled"), "/opt/so/saltstack/default/salt/strelka/rules/compiled/rules.compiled" ] + compiled_rules_path = "/opt/so/saltstack/local/salt/strelka/rules/compiled/rules.compiled" rule_files = glob.glob(os.path.join(rules_dir, '**/*.yar'), recursive=True) files_to_compile = {} removed_count = 0 @@ -57,9 +57,12 @@ def compile_yara_rules(rules_dir): # Compile all remaining valid rules into a single file if files_to_compile: compiled_rules = yara.compile(filepaths=files_to_compile) - for path in compiled_rules_path: - compiled_rules.save(path) - print(f"All remaining rules compiled and saved into {path}") + compiled_rules.save(compiled_rules_path) + print(f"All remaining rules compiled and saved into {compiled_rules_path}") + # Remove the rules.compiled if there aren't any files to be compiled + else: + if os.path.exists(compiled_rules_path): + os.remove(compiled_rules_path) # Print summary of compilation results print(f"Summary: {success_count} rules compiled successfully, {removed_count} rules removed due to errors.") diff --git a/salt/strelka/config.sls b/salt/strelka/config.sls index e8f211bc8..f03afa61b 100644 --- a/salt/strelka/config.sls +++ b/salt/strelka/config.sls @@ -34,7 +34,18 @@ strelkalogdir: - name: /nsm/strelka/log - user: 939 - group: 939 - - makedirs: True + +strelkagkredisdatadir: + file.directory: + - name: /nsm/strelka/gk-redis-data + - user: 939 + - group: 939 + +strelkacoordredisdatadir: + file.directory: + - name: /nsm/strelka/coord-redis-data + - user: 939 + - group: 939 strelka_sbin: file.recurse: @@ -44,20 +55,6 @@ strelka_sbin: - group: 939 - file_mode: 755 -strelkagkredisdatadir: - file.directory: - - name: /nsm/strelka/gk-redis-data - - user: 939 - - group: 939 - - makedirs: True - -strelkacoordredisdatadir: - file.directory: - - name: /nsm/strelka/coord-redis-data - - user: 939 - - group: 939 - - makedirs: True - {% else %} {{sls}}_state_not_allowed: diff --git a/salt/strelka/manager.sls b/salt/strelka/manager.sls index 1c56a18fd..108a12deb 100644 --- a/salt/strelka/manager.sls +++ b/salt/strelka/manager.sls @@ -7,9 +7,9 @@ {% if sls in allowed_states %} # Strelka config -strelkaconfdir: +strelkarulesdir: file.directory: - - name: /opt/so/conf/strelka/rules/compiled/ + - name: /opt/so/conf/strelka/rules - user: 939 - group: 939 - makedirs: True @@ -20,21 +20,12 @@ strelkacompileyara: - source: salt://strelka/compile_yara/compile_yara.py - user: 939 - group: 939 - - makedirs: True - -strelkarulesdir: - file.directory: - - name: /opt/so/conf/strelka/rules - - user: 939 - - group: 939 - - makedirs: True strelkareposdir: file.directory: - name: /opt/so/conf/strelka/repos - user: 939 - group: 939 - - makedirs: True {% else %} From c864fec70cc1d06e5558d7de9cd12ff32675f530 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 9 May 2024 11:53:50 -0400 Subject: [PATCH 539/777] allow strelka.manager to run on standalone --- salt/strelka/manager.sls | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/strelka/manager.sls b/salt/strelka/manager.sls index 108a12deb..6a4aea416 100644 --- a/salt/strelka/manager.sls +++ b/salt/strelka/manager.sls @@ -4,7 +4,8 @@ # Elastic License 2.0. {% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls in allowed_states %} +{# if strelka.manager or strelka in allowed_states #} +{% if sls in allowed_states or sls.split('.')[0] in allowed_states %} # Strelka config strelkarulesdir: From 823ff7ce11009ee9e85b7fbaef81a617395f1914 Mon Sep 17 00:00:00 2001 From: Wes Date: Thu, 9 May 2024 17:03:13 +0000 Subject: [PATCH 540/777] Remove exclusions and repos --- salt/strelka/defaults.yaml | 22 ---------------------- salt/strelka/rules/repos.txt.jinja | 2 -- salt/strelka/soc_strelka.yaml | 12 ------------ 3 files changed, 36 deletions(-) delete mode 100644 salt/strelka/rules/repos.txt.jinja diff --git a/salt/strelka/defaults.yaml b/salt/strelka/defaults.yaml index f91ad8691..4d69bf53b 100644 --- a/salt/strelka/defaults.yaml +++ b/salt/strelka/defaults.yaml @@ -733,28 +733,6 @@ strelka: enabled: False rules: enabled: True - repos: - - https://github.com/Security-Onion-Solutions/securityonion-yara.git - excluded: - - apt_flame2_orchestrator.yar - - apt_tetris.yar - - gen_susp_js_obfuscatorio.yar - - gen_webshells.yar - - generic_anomalies.yar - - general_cloaking.yar - - thor_inverse_matches.yar - - yara_mixed_ext_vars.yar - - apt_apt27_hyperbro.yar - - apt_turla_gazer.yar - - gen_google_anomaly.yar - - gen_icon_anomalies.yar - - gen_nvidia_leaked_cert.yar - - gen_sign_anomalies.yar - - gen_susp_xor.yar - - gen_webshells_ext_vars.yar - - configured_vulns_ext_vars.yar - - expl_outlook_cve_2023_23397.yar - - gen_mal_3cx_compromise_mar23.yar filecheck: historypath: '/nsm/strelka/history/' strelkapath: '/nsm/strelka/unprocessed/' diff --git a/salt/strelka/rules/repos.txt.jinja b/salt/strelka/rules/repos.txt.jinja deleted file mode 100644 index 043a02203..000000000 --- a/salt/strelka/rules/repos.txt.jinja +++ /dev/null @@ -1,2 +0,0 @@ -# DO NOT EDIT THIS FILE! Strelka YARA rule repos are stored here from the strelka:rules:repos pillar section -{{ STRELKAREPOS | join('\n') }} diff --git a/salt/strelka/soc_strelka.yaml b/salt/strelka/soc_strelka.yaml index e5240b9c9..947215bd5 100644 --- a/salt/strelka/soc_strelka.yaml +++ b/salt/strelka/soc_strelka.yaml @@ -578,18 +578,6 @@ strelka: global: False helpLink: strelka.html advanced: False - repos: - description: List of repos for so-yara-download to use to download rules. - readonly: False - global: False - helpLink: strelka.html - advanced: False - excluded: - description: List of rules to exclude so-yara-update from download and propagating to backend nodes. - readonly: False - global: False - helpLink: strelka.html - advanced: False filecheck: historypath: description: The path for previously scanned files. From 8a34f5621cfa7b881b6bb47ce8873d1a32c23d70 Mon Sep 17 00:00:00 2001 From: Wes Date: Thu, 9 May 2024 17:26:45 +0000 Subject: [PATCH 541/777] Remove old YARA download script --- .../manager/tools/sbin_jinja/so-yara-download | 51 ------------------- 1 file changed, 51 deletions(-) delete mode 100644 salt/manager/tools/sbin_jinja/so-yara-download diff --git a/salt/manager/tools/sbin_jinja/so-yara-download b/salt/manager/tools/sbin_jinja/so-yara-download deleted file mode 100644 index aa9576253..000000000 --- a/salt/manager/tools/sbin_jinja/so-yara-download +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/bash -NOROOT=1 -. /usr/sbin/so-common - -{%- set proxy = salt['pillar.get']('manager:proxy') %} -{%- set noproxy = salt['pillar.get']('manager:no_proxy', '') %} - -# Download the rules from the internet -{%- if proxy %} -export http_proxy={{ proxy }} -export https_proxy={{ proxy }} -export no_proxy="{{ noproxy }}" -{%- endif %} - -repos="/opt/so/conf/strelka/repos.txt" -output_dir=/nsm/rules/yara -gh_status=$(curl -s -o /dev/null -w "%{http_code}" https://github.com) -clone_dir="/tmp" -if [ "$gh_status" == "200" ] || [ "$gh_status" == "301" ]; then - - while IFS= read -r repo; do - if ! $(echo "$repo" | grep -qE '^#'); then - # Remove old repo if existing bc of previous error condition or unexpected disruption - repo_name=`echo $repo | awk -F '/' '{print $NF}'` - [ -d $output_dir/$repo_name ] && rm -rf $output_dir/$repo_name - - # Clone repo and make appropriate directories for rules - git clone $repo $clone_dir/$repo_name - echo "Analyzing rules from $clone_dir/$repo_name..." - mkdir -p $output_dir/$repo_name - # Ensure a copy of the license is available for the rules - [ -f $clone_dir/$repo_name/LICENSE ] && cp $clone_dir/$repo_name/LICENSE $output_dir/$repo_name - - # Copy over rules - for i in $(find $clone_dir/$repo_name -name "*.yar*"); do - rule_name=$(echo $i | awk -F '/' '{print $NF}') - cp $i $output_dir/$repo_name - done - rm -rf $clone_dir/$repo_name - fi - done < $repos - - echo "Done!" - -/usr/sbin/so-yara-update - -else - echo "Server returned $gh_status status code." - echo "No connectivity to Github...exiting..." - exit 1 -fi From ea4cf4291310ca489f3598dccd0a0750eb584050 Mon Sep 17 00:00:00 2001 From: Wes Date: Thu, 9 May 2024 17:26:54 +0000 Subject: [PATCH 542/777] Remove old YARA update script --- salt/manager/tools/sbin_jinja/so-yara-update | 41 -------------------- 1 file changed, 41 deletions(-) delete mode 100644 salt/manager/tools/sbin_jinja/so-yara-update diff --git a/salt/manager/tools/sbin_jinja/so-yara-update b/salt/manager/tools/sbin_jinja/so-yara-update deleted file mode 100644 index 07c940f47..000000000 --- a/salt/manager/tools/sbin_jinja/so-yara-update +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -NOROOT=1 -. /usr/sbin/so-common - -echo "Starting to check for yara rule updates at $(date)..." - -newcounter=0 -excludedcounter=0 -excluded_rules=({{ EXCLUDEDRULES | join(' ') }}) - -# Pull down the SO Rules -SORULEDIR=/nsm/rules/yara -OUTPUTDIR=/opt/so/saltstack/local/salt/strelka/rules - -mkdir -p $OUTPUTDIR -# remove all rules prior to copy so we can clear out old rules -rm -f $OUTPUTDIR/* - -for i in $(find $SORULEDIR -name "*.yar" -o -name "*.yara"); do - rule_name=$(echo $i | awk -F '/' '{print $NF}') - if [[ ! "${excluded_rules[*]}" =~ ${rule_name} ]]; then - echo "Adding rule: $rule_name..." - cp $i $OUTPUTDIR/$rule_name - ((newcounter++)) - else - echo "Excluding rule: $rule_name..." - ((excludedcounter++)) - fi -done - -if [ "$newcounter" -gt 0 ] || [ "$excludedcounter" -gt 0 ];then - echo "$newcounter rules added." - echo "$excludedcounter rule(s) excluded." -fi - -echo "Finished rule updates at $(date)..." From 6ed82d7b293f74f16041a73c63afceedcf1b2701 Mon Sep 17 00:00:00 2001 From: Wes Date: Thu, 9 May 2024 17:27:46 +0000 Subject: [PATCH 543/777] Remove YARA download in setup --- setup/so-setup | 6 ------ 1 file changed, 6 deletions(-) diff --git a/setup/so-setup b/setup/so-setup index b76f9bb98..1b91318b4 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -761,16 +761,10 @@ if ! [[ -f $install_opt_file ]]; then if [[ ! $is_airgap ]]; then title "Downloading IDS Rules" logCmd "so-rule-update" - title "Downloading YARA rules" - logCmd "su socore -c '/usr/sbin/so-yara-download'" if [[ $monints || $is_import ]]; then title "Restarting Suricata to pick up the new rules" logCmd "so-suricata-restart" fi - if [[ $monints ]]; then - title "Restarting Strelka to use new rules" - logCmd "so-strelka-restart" - fi fi title "Setting up Kibana Default Space" logCmd "so-kibana-space-defaults" From 074d063feedd83786fa694ba5608f14646ef10d3 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 9 May 2024 14:52:58 -0400 Subject: [PATCH 544/777] tests will retry on any rule import failure --- setup/so-functions | 23 +++++++++++++++++++---- setup/so-setup | 3 +++ 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 80ad0be6a..3cdaee9ca 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1259,12 +1259,27 @@ soc_pillar() { " srvKey: '$SOCSRVKEY'"\ " modules:"\ " elastalertengine:"\ - " allowRegex: '$ELASTALERT_ALLOW_REGEX'"\ + " allowRegex: '$ELASTALERT_ALLOW_REGEX'" > "$soc_pillar_file" + if [[ -n "$ELASTALERT_FAIL_ERROR_COUNT" ]]; then + printf '%s\n'\ + " failAfterConsecutiveErrorCount: $ELASTALERT_FAIL_ERROR_COUNT" >> "$soc_pillar_file" + fi + + printf '%s\n'\ " strelkaengine:"\ - " allowRegex: '$STRELKA_ALLOW_REGEX'"\ + " allowRegex: '$STRELKA_ALLOW_REGEX'" >> "$soc_pillar_file" + if [[ -n "$STRELKA_FAIL_ERROR_COUNT" ]]; then + printf '%s\n'\ + " failAfterConsecutiveErrorCount: $STRELKA_FAIL_ERROR_COUNT" >> "$soc_pillar_file" + fi + + printf '%s\n'\ " suricataengine:"\ - " allowRegex: '$SURICATA_ALLOW_REGEX'"\ - "" > "$soc_pillar_file" + " allowRegex: '$SURICATA_ALLOW_REGEX'" >> "$soc_pillar_file" + if [[ -n "$SURICATA_FAIL_ERROR_COUNT" ]]; then + printf '%s\n'\ + " failAfterConsecutiveErrorCount: $SURICATA_FAIL_ERROR_COUNT" >> "$soc_pillar_file" + fi if [[ $telemetry -ne 0 ]]; then echo " telemetryEnabled: false" >> $soc_pillar_file diff --git a/setup/so-setup b/setup/so-setup index b76f9bb98..cb535469b 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -246,8 +246,11 @@ if [ -n "$test_profile" ]; then WEBPASSWD1=0n10nus3r WEBPASSWD2=0n10nus3r STRELKA_ALLOW_REGEX="EquationGroup_Toolset_Apr17__ELV_.*" + STRELKA_FAIL_ERROR_COUNT=1 ELASTALERT_ALLOW_REGEX="Security Onion" + ELASTALERT_FAIL_ERROR_COUNT=1 SURICATA_ALLOW_REGEX="(200033\\d|2100538|2102466)" + SURICATA_FAIL_ERROR_COUNT=1 update_sudoers_for_testing fi From 19e1aaa1a65358f6cebba8645c28d0604b79371c Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 9 May 2024 15:45:33 -0400 Subject: [PATCH 545/777] exclude detection rule errors --- salt/common/tools/sbin/so-log-check | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index b83c98e7a..67eff6d54 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -201,6 +201,7 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unknown column" # Elastalert errors from running EQL queries EXCLUDED_ERRORS="$EXCLUDED_ERRORS|parsing_exception" # Elastalert EQL parsing issue. Temp. EXCLUDED_ERRORS="$EXCLUDED_ERRORS|context deadline exceeded" + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Error running query:" # Specific issues with detection rules fi RESULT=0 From fecd674fdb878ed289308091da574792c06cc65c Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 9 May 2024 17:55:41 -0400 Subject: [PATCH 546/777] Add quick action to find related alerts for a detection --- salt/soc/defaults.yaml | 6 ++++++ salt/soc/merged.map.jinja | 1 + 2 files changed, 7 insertions(+) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index b6a52fd75..dcb84aea7 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -78,6 +78,12 @@ soc: target: '' links: - '/#/hunt?q=(process.entity_id:"{:process.entity_id}" OR process.entity_id:"{:process.Ext.ancestry|processAncestors}") | groupby event.dataset | groupby -sankey event.dataset event.action | groupby event.action | groupby process.parent.name | groupby -sankey process.parent.name process.name | groupby process.name | groupby process.command_line | groupby host.name user.name | groupby source.ip source.port destination.ip destination.port | groupby dns.question.name | groupby dns.answers.data | groupby file.path | groupby registry.path | groupby dll.path' + - name: actionRelatedAlerts + description: actionRelatedAlertsHelp + icon: fa-bell + links: + - '/#/alerts?q=rule.uuid: {:so_detection.publicId|escape} | groupby rule.name event.module* event.severity_label' + target: '' eventFields: default: - soc_timestamp diff --git a/salt/soc/merged.map.jinja b/salt/soc/merged.map.jinja index 222566dba..e53790dc1 100644 --- a/salt/soc/merged.map.jinja +++ b/salt/soc/merged.map.jinja @@ -79,6 +79,7 @@ {% do SOCMERGED.config.server.client.update({'job': {'actions': standard_actions}}) %} {% do SOCMERGED.config.server.client.alerts.update({'actions': standard_actions}) %} {% do SOCMERGED.config.server.client.cases.update({'actions': standard_actions}) %} +{% do SOCMERGED.config.server.client.detections.update({'actions': standard_actions}) %} {# replace the _x_ with . for soc ui to config conversion #} {% do SOCMERGED.config.eventFields.update({':endpoint:events.api': SOCMERGED.config.eventFields.pop(':endpoint:events_x_api') }) %} From a1291e43c368024bb53676a44640d1bad651ca6e Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Fri, 10 May 2024 07:58:13 -0400 Subject: [PATCH 547/777] FIX: so-index-list typo #12988 --- salt/elasticsearch/tools/sbin/so-index-list | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/tools/sbin/so-index-list b/salt/elasticsearch/tools/sbin/so-index-list index 1e4595b35..dbdd6fa29 100755 --- a/salt/elasticsearch/tools/sbin/so-index-list +++ b/salt/elasticsearch/tools/sbin/so-index-list @@ -7,4 +7,4 @@ -curl -K /opt/so/conf/elasticsearch/curl.config-X GET -k -L "https://localhost:9200/_cat/indices?v&s=index" +curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L "https://localhost:9200/_cat/indices?pretty&v&s=index" From 26cb8d43e13fc05cb0d2178e8cc82ce4f5338564 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Fri, 10 May 2024 08:01:56 -0400 Subject: [PATCH 548/777] FIX: so-index-list typo #12988 --- salt/elasticsearch/tools/sbin/so-index-list | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/tools/sbin/so-index-list b/salt/elasticsearch/tools/sbin/so-index-list index dbdd6fa29..572e55cba 100755 --- a/salt/elasticsearch/tools/sbin/so-index-list +++ b/salt/elasticsearch/tools/sbin/so-index-list @@ -5,6 +5,6 @@ # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. - +. /usr/sbin/so-common curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L "https://localhost:9200/_cat/indices?pretty&v&s=index" From 950c68783c9524cc49a0bc9f1477d6c851630a78 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 10 May 2024 11:46:00 -0400 Subject: [PATCH 549/777] add pkg policycoreutils-python-utils to idh node --- salt/idh/openssh/config.sls | 2 ++ salt/idh/openssh/init.sls | 6 ++++++ 2 files changed, 8 insertions(+) diff --git a/salt/idh/openssh/config.sls b/salt/idh/openssh/config.sls index d358bb5be..4bd177faa 100644 --- a/salt/idh/openssh/config.sls +++ b/salt/idh/openssh/config.sls @@ -11,6 +11,8 @@ idh_sshd_selinux: - sel_type: ssh_port_t - prereq: - file: openssh_config + - require: + - file: python_selinux_mgmt_tools {% endif %} openssh_config: diff --git a/salt/idh/openssh/init.sls b/salt/idh/openssh/init.sls index ba0a8ab04..79d082502 100644 --- a/salt/idh/openssh/init.sls +++ b/salt/idh/openssh/init.sls @@ -15,3 +15,9 @@ openssh: - enable: False - name: {{ openssh_map.service }} {% endif %} + +{% if grains.os_family == 'RedHat' %} +python_selinux_mgmt_tools: + pkg.installed: + - name: policycoreutils-python-utils +{% endif %} From 986cbb129a009fadd8044bc3dfa6706a81629ea7 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 10 May 2024 12:33:56 -0400 Subject: [PATCH 550/777] pkg not file --- salt/idh/openssh/config.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/idh/openssh/config.sls b/salt/idh/openssh/config.sls index 4bd177faa..5e2acd8d2 100644 --- a/salt/idh/openssh/config.sls +++ b/salt/idh/openssh/config.sls @@ -12,7 +12,7 @@ idh_sshd_selinux: - prereq: - file: openssh_config - require: - - file: python_selinux_mgmt_tools + - pkg: python_selinux_mgmt_tools {% endif %} openssh_config: From 2a0e33401df668f0ec71364d428f098fb2686225 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Fri, 10 May 2024 16:54:50 -0400 Subject: [PATCH 551/777] support upgrade tests --- salt/manager/tools/sbin/soup | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index fa3c3b5ee..c8acab0a6 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -584,9 +584,22 @@ up_to_2.4.60() { up_to_2.4.70() { playbook_migration toggle_telemetry + add_detection_test_pillars INSTALLEDVERSION=2.4.70 } +add_detection_test_pillars() { + if [[ -n "$SOUP_INTERNAL_TESTING" ]]; then + echo "Adding detection pillar values for automated testing" + so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.elastalertengine.allowRegex SecurityOnion + so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.elastalertengine.failAfterConsecutiveErrorCount 1 + so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.strelkaengine.allowRegex "EquationGroup_Toolset_Apr17__ELV_.*" + so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.strelkaengine.failAfterConsecutiveErrorCount 1 + so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.suricataengine.allowRegex "(200033\\d|2100538|2102466)" + so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.suricataengine.failAfterConsecutiveErrorCount 1 + fi +} + toggle_telemetry() { if [[ -z $UNATTENDED && $is_airgap -ne 0 ]]; then cat << ASSIST_EOF From 788c31014d5dac710981cf791352bebf000f4e7e Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Mon, 13 May 2024 08:30:48 -0400 Subject: [PATCH 552/777] Update README.md to reference new screenshots for 2.4.70 --- README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 19a560419..27502c787 100644 --- a/README.md +++ b/README.md @@ -8,19 +8,19 @@ Alerts ![Alerts](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/50_alerts.png) Dashboards -![Dashboards](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/51_dashboards.png) +![Dashboards](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/53_dashboards.png) Hunt -![Hunt](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/52_hunt.png) +![Hunt](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/56_hunt.png) PCAP -![PCAP](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/53_pcap.png) +![PCAP](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/62_pcap.png) Grid -![Grid](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/57_grid.png) +![Grid](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/75_grid.png) Config -![Config](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/61_config.png) +![Config](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/87_config.png) ### Release Notes From ae323cf38531ee3b29258c187dab94010abd5a12 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Mon, 13 May 2024 08:34:44 -0400 Subject: [PATCH 553/777] Update README.md to include new Detections screenshot --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index 27502c787..a990326a8 100644 --- a/README.md +++ b/README.md @@ -13,6 +13,9 @@ Dashboards Hunt ![Hunt](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/56_hunt.png) +Detections +![Detections](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/59_detections.png) + PCAP ![PCAP](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/62_pcap.png) From 641899ad562b4fc02ed182396f07459c284c3456 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 13 May 2024 09:50:14 -0400 Subject: [PATCH 554/777] Backup Suricata for migration and remove advanced from reverselookups --- salt/manager/tools/sbin/soup | 11 +++++++++++ salt/soc/soc_soc.yaml | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index aaa703ba9..f22bdec22 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -583,6 +583,7 @@ up_to_2.4.60() { up_to_2.4.70() { playbook_migration + suricata_idstools_migration toggle_telemetry add_detection_test_pillars INSTALLEDVERSION=2.4.70 @@ -634,6 +635,16 @@ ASSIST_EOF fi } +suricata_idstools_migration() { + #Backup the pillars for idstools + mkdir -p /nsm/backup/detections-migration/idstools + rsync -av /opt/so/saltstack/local/pillar/idstools /nsm/backup/detections-migration/idstools + + #Backup Thresholds + mkdir -p /nsm/backup/detections-migration/suricata + rsync -av /opt/so/saltstack/local/salt/suricata/thresholding /nsm/backup/detections-migration/suricata +} + playbook_migration() { # Start SOC Detections migration mkdir -p /nsm/backup/detections-migration/{suricata,sigma/rules,elastalert} diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index c908521fa..2a7659384 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -235,7 +235,7 @@ soc: apiTimeoutMs: description: Duration (in milliseconds) to wait for a response from the SOC server API before giving up and showing an error on the SOC UI. global: True - advanced: True + advanced: False forcedType: int webSocketTimeoutMs: description: Duration (in milliseconds) to wait for a response from the SOC server websocket before giving up and reconnecting. From 6c71c45ef6283d8c1849e706446168acc43a9dbc Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 13 May 2024 09:55:57 -0400 Subject: [PATCH 555/777] Update soup --- salt/manager/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index f22bdec22..b57af160d 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -638,7 +638,7 @@ ASSIST_EOF suricata_idstools_migration() { #Backup the pillars for idstools mkdir -p /nsm/backup/detections-migration/idstools - rsync -av /opt/so/saltstack/local/pillar/idstools /nsm/backup/detections-migration/idstools + rsync -av /opt/so/saltstack/local/pillar/idstools/* /nsm/backup/detections-migration/idstools #Backup Thresholds mkdir -p /nsm/backup/detections-migration/suricata From 28e40e42b388973ca3eca646458105ce970ab3cb Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 13 May 2024 09:58:32 -0400 Subject: [PATCH 556/777] Update soc_soc.yaml --- salt/soc/soc_soc.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index 2a7659384..c908521fa 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -235,7 +235,7 @@ soc: apiTimeoutMs: description: Duration (in milliseconds) to wait for a response from the SOC server API before giving up and showing an error on the SOC UI. global: True - advanced: False + advanced: True forcedType: int webSocketTimeoutMs: description: Duration (in milliseconds) to wait for a response from the SOC server websocket before giving up and reconnecting. From 9d6f6c7893f45f2d830f0d064278334b4e57bae6 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 13 May 2024 10:09:35 -0400 Subject: [PATCH 557/777] Update soup --- salt/manager/tools/sbin/soup | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index b57af160d..8e77fecf0 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -639,10 +639,20 @@ suricata_idstools_migration() { #Backup the pillars for idstools mkdir -p /nsm/backup/detections-migration/idstools rsync -av /opt/so/saltstack/local/pillar/idstools/* /nsm/backup/detections-migration/idstools + if [[ $? -eq 0 ]]; then + echo "IDStools configuration has been backed up." + else + fail "Error: rsync failed to copy the files. IDStools configuration has not been backed up." + fi #Backup Thresholds mkdir -p /nsm/backup/detections-migration/suricata rsync -av /opt/so/saltstack/local/salt/suricata/thresholding /nsm/backup/detections-migration/suricata + if [[ $? -eq 0 ]]; then + echo "Suricata thresholds have been backed up." + else + fail "Error: rsync failed to copy the files. Thresholds have not been backed up." + fi } playbook_migration() { From 649f52dac7a039f7f1fa05089d434c087755b026 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 13 May 2024 10:37:56 -0400 Subject: [PATCH 558/777] create_local_directories in soup too --- salt/common/tools/sbin/so-common | 15 +++++++++++++++ salt/manager/tools/sbin/soup | 2 ++ setup/so-functions | 15 --------------- setup/so-setup | 2 +- 4 files changed, 18 insertions(+), 16 deletions(-) diff --git a/salt/common/tools/sbin/so-common b/salt/common/tools/sbin/so-common index a71d67f81..d6cd4c4e8 100755 --- a/salt/common/tools/sbin/so-common +++ b/salt/common/tools/sbin/so-common @@ -179,6 +179,21 @@ copy_new_files() { cd /tmp } +create_local_directories() { + info "Creating local pillar and salt directories if needed" + PILLARSALTDIR=$1 + for i in "pillar" "salt"; do + for d in $(find $PILLARSALTDIR/$i -type d); do + suffixdir=${d//$PILLARSALTDIR/} + if [ ! -d "$local_salt_dir/$suffixdir" ]; then + logCmd "mkdir -pv $local_salt_dir$suffixdir" + fi + done + logCmd "chown -R socore:socore $local_salt_dir/$i" + done + +} + disable_fastestmirror() { sed -i 's/enabled=1/enabled=0/' /etc/yum/pluginconf.d/fastestmirror.conf } diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index aaa703ba9..5a8f70771 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -1026,6 +1026,7 @@ main() { backup_old_states_pillars fi copy_new_files + create_local_directories "/opt/so/saltstack/default" apply_hotfix echo "Hotfix applied" update_version @@ -1092,6 +1093,7 @@ main() { echo "Copying new Security Onion code from $UPDATE_DIR to $DEFAULT_SALT_DIR." copy_new_files echo "" + create_local_directories "/opt/so/saltstack/default" update_version echo "" diff --git a/setup/so-functions b/setup/so-functions index 3cdaee9ca..d91161203 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -762,21 +762,6 @@ copy_salt_master_config() { logCmd "systemctl restart salt-master" } -create_local_directories() { - info "Creating local pillar and salt directories" - PILLARSALTDIR=${SCRIPTDIR::-5} - for i in "pillar" "salt"; do - for d in $(find $PILLARSALTDIR/$i -type d); do - suffixdir=${d//$PILLARSALTDIR/} - if [ ! -d "$local_salt_dir/$suffixdir" ]; then - logCmd "mkdir -pv $local_salt_dir$suffixdir" - fi - done - logCmd "chown -R socore:socore $local_salt_dir/$i" - done - -} - create_local_nids_rules() { title "Create a local.rules file so it doesn't get removed on updates" logCmd "mkdir -p /opt/so/saltstack/local/salt/idstools" diff --git a/setup/so-setup b/setup/so-setup index cb535469b..0387da0de 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -672,7 +672,7 @@ if ! [[ -f $install_opt_file ]]; then # Add the socore user add_socore_user_manager - create_local_directories + create_local_directories ${SCRIPTDIR::-5} setup_salt_master_dirs create_manager_pillars From 2643da978b282a8c55d7ae8cdd42e117caeeb859 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 13 May 2024 11:51:10 -0400 Subject: [PATCH 559/777] those functions in so-functions --- salt/common/tools/sbin/so-common | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/common/tools/sbin/so-common b/salt/common/tools/sbin/so-common index d6cd4c4e8..77e80ee8c 100755 --- a/salt/common/tools/sbin/so-common +++ b/salt/common/tools/sbin/so-common @@ -180,13 +180,13 @@ copy_new_files() { } create_local_directories() { - info "Creating local pillar and salt directories if needed" + echo "Creating local pillar and salt directories if needed" PILLARSALTDIR=$1 for i in "pillar" "salt"; do for d in $(find $PILLARSALTDIR/$i -type d); do suffixdir=${d//$PILLARSALTDIR/} if [ ! -d "$local_salt_dir/$suffixdir" ]; then - logCmd "mkdir -pv $local_salt_dir$suffixdir" + mkdir -pv $local_salt_dir$suffixdir fi done logCmd "chown -R socore:socore $local_salt_dir/$i" From eb038582308021c606490449aea8caf22a7300b7 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 13 May 2024 12:44:57 -0400 Subject: [PATCH 560/777] missed one --- salt/common/tools/sbin/so-common | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/salt/common/tools/sbin/so-common b/salt/common/tools/sbin/so-common index 77e80ee8c..0ed7a662d 100755 --- a/salt/common/tools/sbin/so-common +++ b/salt/common/tools/sbin/so-common @@ -189,9 +189,8 @@ create_local_directories() { mkdir -pv $local_salt_dir$suffixdir fi done - logCmd "chown -R socore:socore $local_salt_dir/$i" + chown -R socore:socore $local_salt_dir/$i done - } disable_fastestmirror() { From 26b5a3991252753226e8b64528d9e7ab6fe3354d Mon Sep 17 00:00:00 2001 From: weslambert Date: Mon, 13 May 2024 12:59:17 -0400 Subject: [PATCH 561/777] Change index to detections.alerts --- salt/elastalert/files/modules/so/securityonion-es.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/elastalert/files/modules/so/securityonion-es.py b/salt/elastalert/files/modules/so/securityonion-es.py index 0a82bdce6..d9bb8009e 100644 --- a/salt/elastalert/files/modules/so/securityonion-es.py +++ b/salt/elastalert/files/modules/so/securityonion-es.py @@ -56,8 +56,8 @@ class SecurityOnionESAlerter(Alerter): "event_data": match, "@timestamp": timestamp } - url = f"https://{self.rule['es_host']}:{self.rule['es_port']}/logs-playbook.alerts-so/_doc/" + url = f"https://{self.rule['es_host']}:{self.rule['es_port']}/logs-detections.alerts-so/_doc/" requests.post(url, data=json.dumps(payload), headers=headers, verify=False, auth=creds) def get_info(self): - return {'type': 'SecurityOnionESAlerter'} \ No newline at end of file + return {'type': 'SecurityOnionESAlerter'} From c4c38f58cb145e2be9723c382cd8bf611eb2741e Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Mon, 13 May 2024 13:13:57 -0400 Subject: [PATCH 562/777] Update descriptions --- salt/idstools/soc_idstools.yaml | 2 +- salt/soc/soc_soc.yaml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/idstools/soc_idstools.yaml b/salt/idstools/soc_idstools.yaml index 698a7a1fc..993abfd51 100644 --- a/salt/idstools/soc_idstools.yaml +++ b/salt/idstools/soc_idstools.yaml @@ -9,7 +9,7 @@ idstools: forcedType: string helpLink: rules.html ruleset: - description: 'Defines the ruleset you want to run. Options are ETOPEN or ETPRO. Once you have changed the ruleset here, you will need to wait for the rule update to take place (every 8 hours), or you can force the update by nagivating to Detections --> Options dropdown menu --> Suricata --> Full Update. WARNING! Changing the ruleset will remove all existing Suricata rules of the previous ruleset and their associated overrides. This removal cannot be undone.' + description: 'Defines the ruleset you want to run. Options are ETOPEN or ETPRO. Once you have changed the ruleset here, you will need to wait for the rule update to take place (every 24 hours), or you can force the update by nagivating to Detections --> Options dropdown menu --> Suricata --> Full Update. WARNING! Changing the ruleset will remove all existing non-overlapping Suricata rules of the previous ruleset and their associated overrides. This removal cannot be undone.' global: True regex: ETPRO\b|ETOPEN\b helpLink: rules.html diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index c908521fa..fa8d80bc8 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -39,7 +39,7 @@ soc: helpLink: soc-customization.html sigma_final_pipeline__yaml: title: Final Sigma Pipeline - description: Final Processing Pipeline for Sigma Rules (future use, not yet complete) + description: Final Processing Pipeline for Sigma Rules. syntax: yaml file: True global: True @@ -115,7 +115,7 @@ soc: helpLink: sigma.html airgap: *eerulesRepos sigmaRulePackages: - description: 'Defines the Sigma Community Ruleset you want to run. One of these (core | core+ | core++ | all ) as well as an optional Add-on (emerging_threats_addon). Once you have changed the ruleset here, you will need to wait for the rule update to take place (every 8 hours), or you can force the update by nagivating to Detections --> Options dropdown menu --> Elastalert --> Full Update. WARNING! Changing the ruleset will remove all existing Sigma rules of the previous ruleset and their associated overrides. This removal cannot be undone.' + description: 'Defines the Sigma Community Ruleset you want to run. One of these (core | core+ | core++ | all ) as well as an optional Add-on (emerging_threats_addon). Once you have changed the ruleset here, you will need to wait for the scheduled rule update to take place (by default, every 24 hours), or you can force the update by nagivating to Detections --> Options dropdown menu --> Elastalert --> Full Update. WARNING! Changing the ruleset will remove all existing non-overlapping Sigma rules of the previous ruleset and their associated overrides. This removal cannot be undone.' global: True advanced: False helpLink: sigma.html @@ -255,7 +255,7 @@ soc: description: Set to true to enable case management in SOC. global: True detectionsEnabled: - description: Set to true to enable the Detections module in SOC. (future use, not yet complete) + description: Set to true to enable the Detections module in SOC. global: True inactiveTools: description: List of external tools to remove from the SOC UI. From e430de88d377d6fee6bc551cdc68e9f7b458ff56 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Mon, 13 May 2024 13:15:06 -0400 Subject: [PATCH 563/777] Change rule updates to 24h --- salt/soc/defaults.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 8ef0047be..7b33adaa4 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1284,7 +1284,7 @@ soc: so-import: - securityonion-resources+critical - securityonion-resources+high - communityRulesImportFrequencySeconds: 28800 + communityRulesImportFrequencySeconds: 86400 communityRulesImportErrorSeconds: 300 failAfterConsecutiveErrorCount: 10 denyRegex: '' @@ -1353,7 +1353,7 @@ soc: autoEnabledYaraRules: - securityonion-yara autoUpdateEnabled: true - communityRulesImportFrequencySeconds: 28800 + communityRulesImportFrequencySeconds: 86400 communityRulesImportErrorSeconds: 300 failAfterConsecutiveErrorCount: 10 compileYaraPythonScriptPath: /opt/sensoroni/yara/compile_yara.py @@ -1373,7 +1373,7 @@ soc: suricataengine: allowRegex: '' autoUpdateEnabled: true - communityRulesImportFrequencySeconds: 28800 + communityRulesImportFrequencySeconds: 86400 communityRulesImportErrorSeconds: 300 failAfterConsecutiveErrorCount: 10 communityRulesFile: /nsm/rules/suricata/emerging-all.rules From c8870eae65dc395cf5f2165c4b8772dc41f8cca6 Mon Sep 17 00:00:00 2001 From: weslambert Date: Mon, 13 May 2024 14:23:47 -0400 Subject: [PATCH 564/777] Add detection alerts template --- salt/elasticsearch/defaults.yaml | 62 ++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index 156483b03..e54d58c3b 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -3591,6 +3591,68 @@ elasticsearch: set_priority: priority: 50 min_age: 30d + so-logs-detections_x_alerts: + index_sorting: false + index_template: + composed_of: + - so-data-streams-mappings + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + - so-logs-mappings + - so-logs-settings + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-detections.alerts-* + priority: 501 + template: + mappings: + _meta: + managed: true + managed_by: security_onion + package: + name: elastic_agent + settings: + index: + lifecycle: + name: so-logs-detections.alerts-so + mapping: + total_fields: + limit: 5001 + number_of_replicas: 0 + sort: + field: '@timestamp' + order: desc + policy: + _meta: + managed: true + managed_by: security_onion + package: + name: elastic_agent + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 60d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 1d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d so-logs-elastic_agent: index_sorting: false index_template: From d606f259d12b049d620d3f200085b91821995643 Mon Sep 17 00:00:00 2001 From: weslambert Date: Mon, 13 May 2024 14:25:11 -0400 Subject: [PATCH 565/777] Add detection alerts --- salt/elasticsearch/soc_elasticsearch.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/elasticsearch/soc_elasticsearch.yaml b/salt/elasticsearch/soc_elasticsearch.yaml index cc92493fb..000fd60b7 100644 --- a/salt/elasticsearch/soc_elasticsearch.yaml +++ b/salt/elasticsearch/soc_elasticsearch.yaml @@ -394,6 +394,7 @@ elasticsearch: so-logs-darktrace_x_ai_analyst_alert: *indexSettings so-logs-darktrace_x_model_breach_alert: *indexSettings so-logs-darktrace_x_system_status_alert: *indexSettings + so-logs-detections_x_alerts: *indexSettings so-logs-f5_bigip_x_log: *indexSettings so-logs-fim_x_event: *indexSettings so-logs-fortinet_x_clientendpoint: *indexSettings From 1ef9509aac03a4b3911d9df343a249222056a61c Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 13 May 2024 14:34:22 -0400 Subject: [PATCH 566/777] define local_salt_dir --- salt/common/tools/sbin/so-common | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/common/tools/sbin/so-common b/salt/common/tools/sbin/so-common index 0ed7a662d..8d4d9f8ab 100755 --- a/salt/common/tools/sbin/so-common +++ b/salt/common/tools/sbin/so-common @@ -182,6 +182,7 @@ copy_new_files() { create_local_directories() { echo "Creating local pillar and salt directories if needed" PILLARSALTDIR=$1 + local_salt_dir="/opt/so/saltstack/local" for i in "pillar" "salt"; do for d in $(find $PILLARSALTDIR/$i -type d); do suffixdir=${d//$PILLARSALTDIR/} From 13062099b3b5758c40182b00504c18489d88eee9 Mon Sep 17 00:00:00 2001 From: weslambert Date: Mon, 13 May 2024 18:04:16 -0400 Subject: [PATCH 567/777] Remove YARA script update and reference to exclusions --- salt/manager/init.sls | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/salt/manager/init.sls b/salt/manager/init.sls index d979482ef..ec37f9ff3 100644 --- a/salt/manager/init.sls +++ b/salt/manager/init.sls @@ -73,17 +73,6 @@ manager_sbin: - exclude_pat: - "*_test.py" -yara_update_scripts: - file.recurse: - - name: /usr/sbin/ - - source: salt://manager/tools/sbin_jinja/ - - user: socore - - group: socore - - file_mode: 755 - - template: jinja - - defaults: - EXCLUDEDRULES: {{ STRELKAMERGED.rules.excluded }} - so-repo-file: file.managed: - name: /opt/so/conf/reposync/repodownload.conf From 5b45c80a62231b6f487e8b4a3f354435bca4f462 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Tue, 14 May 2024 10:01:18 -0400 Subject: [PATCH 568/777] FEATURE: Add NetFlow dashboard #13009 --- salt/soc/defaults.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 7b33adaa4..b96cabf9d 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1931,6 +1931,9 @@ soc: - name: ICS S7 description: S7 (Siemens) network metadata query: 'tags:s7* | groupby event.dataset | groupby -sankey event.dataset source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port' + - name: NetFlow + description: NetFlow records + query: 'event.module:netflow | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby network.type | groupby network.transport | groupby network.direction | groupby netflow.type | groupby netflow.exporter.version | groupby observer.ip | groupby source.as.organization.name | groupby source.geo.country_name | groupby destination.as.organization.name | groupby destination.geo.country_name' - name: Firewall description: Firewall logs query: 'observer.type:firewall | groupby event.action | groupby -sankey event.action observer.ingress.interface.name | groupby observer.ingress.interface.name | groupby network.type | groupby network.transport | groupby source.ip | groupby destination.ip | groupby destination.port' From 67645a662da34ee1931236a868549bdb2a1bc3ef Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Tue, 14 May 2024 10:14:16 -0400 Subject: [PATCH 569/777] FEATURE: Add NetFlow dashboard #13009 --- salt/soc/defaults.yaml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index b96cabf9d..ca64c6b7b 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1258,6 +1258,19 @@ soc: - event_data.destination.port - event_data.process.executable - event_data.process.pid + ':netflow:': + - soc_timestamp + - event.dataset + - source.ip + - source.port + - destination.ip + - destination.port + - network.type + - network.transport + - network.direction + - netflow.type + - netflow.exporter.version + - observer.ip server: bindAddress: 0.0.0.0:9822 baseUrl: / From 51862e580386f505ddd935a7cb08d9306a0864f7 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 14 May 2024 13:08:51 -0400 Subject: [PATCH 570/777] remove idh.services from idh node pillar files --- salt/manager/tools/sbin/so-minion | 6 +----- salt/manager/tools/sbin/soup | 8 +++++++- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index e0e892c3d..8b563ef1d 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -201,11 +201,7 @@ function add_idh_to_minion() { "idh:"\ " enabled: True"\ " restrict_management_ip: $IDH_MGTRESTRICT"\ - " services:" >> "$PILLARFILE" - IFS=',' read -ra IDH_SERVICES_ARRAY <<< "$IDH_SERVICES" - for service in ${IDH_SERVICES_ARRAY[@]}; do - echo " - $service" | tr '[:upper:]' '[:lower:]' | tr -d '"' >> "$PILLARFILE" - done + " " >> $PILLARFILE } function add_logstash_to_minion() { diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 39cd07071..0ac938188 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -438,7 +438,13 @@ post_to_2.4.60() { } post_to_2.4.70() { - echo "Nothing to apply" + echo "Removing idh.services from IDH node pillar files" + for file in /opt/so/saltstack/local/pillar/minions/*_idh.sls; do + if [[ ! $file =~ "/opt/so/saltstack/local/pillar/minions/adv_" ]]; then + echo "Removing idh.services from: $file" + so-yaml.py remove "$file" idh.services + fi + done POSTVERSION=2.4.70 } From 2dbbe8dec473e928d7f04ee7f537eb85b46b6799 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 15 May 2024 10:07:06 -0400 Subject: [PATCH 571/777] soup_scripts put so-yaml in salt file system. move soup scripts to manager.soup_scripts --- salt/{common => manager}/soup_scripts.sls | 16 ++++++++++++++++ salt/manager/tools/sbin/soup | 4 ++-- 2 files changed, 18 insertions(+), 2 deletions(-) rename salt/{common => manager}/soup_scripts.sls (75%) diff --git a/salt/common/soup_scripts.sls b/salt/manager/soup_scripts.sls similarity index 75% rename from salt/common/soup_scripts.sls rename to salt/manager/soup_scripts.sls index 90ee059a4..898de67ee 100644 --- a/salt/common/soup_scripts.sls +++ b/salt/manager/soup_scripts.sls @@ -1,3 +1,8 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + {% if '2.4' in salt['cp.get_file_str']('/etc/soversion') %} {% import_yaml '/opt/so/saltstack/local/pillar/global/soc_global.sls' as SOC_GLOBAL %} @@ -15,6 +20,8 @@ remove_common_so-firewall: file.absent: - name: /opt/so/saltstack/default/salt/common/tools/sbin/so-firewall +# This section is used to put the scripts in place in the Salt file system +# in case a state run tries to overwrite what we do in the next section. copy_so-common_common_tools_sbin: file.copy: - name: /opt/so/saltstack/default/salt/common/tools/sbin/so-common @@ -43,6 +50,15 @@ copy_so-firewall_manager_tools_sbin: - force: True - preserve: True +copy_so-yaml_manager_tools_sbin: + file.copy: + - name: /opt/so/saltstack/default/salt/manager/tools/sbin/so-yaml.py + - source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-yaml.py + - force: True + - preserve: True + +# This section is used to put the new script in place so that it can be called during soup. +# It is faster than calling the states that normally manage them to put them in place. copy_so-common_sbin: file.copy: - name: /usr/sbin/so-common diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 0ac938188..24b6e5427 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -907,7 +907,7 @@ verify_latest_update_script() { else echo "You are not running the latest soup version. Updating soup and its components. This might take multiple runs to complete." - salt-call state.apply common.soup_scripts queue=True -lerror --file-root=$UPDATE_DIR/salt --local --out-file=/dev/null + salt-call state.apply manager.soup_scripts queue=True -lerror --file-root=$UPDATE_DIR/salt --local --out-file=/dev/null # Verify that soup scripts updated as expected get_soup_script_hashes @@ -915,7 +915,7 @@ verify_latest_update_script() { echo "Succesfully updated soup scripts." else echo "There was a problem updating soup scripts. Trying to rerun script update." - salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local + salt-call state.apply manager.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local fi echo "" From 427b1e4524f3ec0267c5a54d58db48a523b60c9d Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 15 May 2024 10:28:02 -0400 Subject: [PATCH 572/777] revert soup_scripts back to common --- salt/{manager => common}/soup_scripts.sls | 0 salt/manager/tools/sbin/soup | 4 ++-- 2 files changed, 2 insertions(+), 2 deletions(-) rename salt/{manager => common}/soup_scripts.sls (100%) diff --git a/salt/manager/soup_scripts.sls b/salt/common/soup_scripts.sls similarity index 100% rename from salt/manager/soup_scripts.sls rename to salt/common/soup_scripts.sls diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 24b6e5427..0ac938188 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -907,7 +907,7 @@ verify_latest_update_script() { else echo "You are not running the latest soup version. Updating soup and its components. This might take multiple runs to complete." - salt-call state.apply manager.soup_scripts queue=True -lerror --file-root=$UPDATE_DIR/salt --local --out-file=/dev/null + salt-call state.apply common.soup_scripts queue=True -lerror --file-root=$UPDATE_DIR/salt --local --out-file=/dev/null # Verify that soup scripts updated as expected get_soup_script_hashes @@ -915,7 +915,7 @@ verify_latest_update_script() { echo "Succesfully updated soup scripts." else echo "There was a problem updating soup scripts. Trying to rerun script update." - salt-call state.apply manager.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local + salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local fi echo "" From 7345d2c5a67d5d0437d3818e309b95ea87e4f9fb Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 15 May 2024 11:16:20 -0400 Subject: [PATCH 573/777] Update enabled.sls --- salt/soc/enabled.sls | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/soc/enabled.sls b/salt/soc/enabled.sls index 6cea0c70d..38e1fd3fe 100644 --- a/salt/soc/enabled.sls +++ b/salt/soc/enabled.sls @@ -44,6 +44,7 @@ so-soc: - /opt/so/conf/soc/soc_users_roles:/opt/sensoroni/rbac/users_roles:rw - /opt/so/conf/soc/queue:/opt/sensoroni/queue:rw - /opt/so/saltstack:/opt/so/saltstack:rw + - /opt/so/conf/soc/migrations:/opt/so/conf/soc/migrations:rw - extra_hosts: {% for node in DOCKER_EXTRA_HOSTS %} {% for hostname, ip in node.items() %} From e3a08478672b9158ef652bf1c257a4a38e0f4d96 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 15 May 2024 11:31:41 -0400 Subject: [PATCH 574/777] Update soup --- salt/manager/tools/sbin/soup | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 0ac938188..edd7aec85 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -659,6 +659,10 @@ suricata_idstools_migration() { else fail "Error: rsync failed to copy the files. Thresholds have not been backed up." fi + + #Tell SOC to migrate + mkdir -p /opt/so/conf/soc/migrations + echo "0" > /opt/so/conf/soc/migrations/suricata-migration-2.4.70 } playbook_migration() { From ea253726a0ae6730e364067ebfa8f7cf854a18f0 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 15 May 2024 13:48:32 -0400 Subject: [PATCH 575/777] fix soup --- salt/manager/tools/sbin/soup | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 0ac938188..0e4f5f8c8 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -439,8 +439,8 @@ post_to_2.4.60() { post_to_2.4.70() { echo "Removing idh.services from IDH node pillar files" - for file in /opt/so/saltstack/local/pillar/minions/*_idh.sls; do - if [[ ! $file =~ "/opt/so/saltstack/local/pillar/minions/adv_" ]]; then + for file in /opt/so/saltstack/local/pillar/minions/*.sls; do + if [[ $file =~ "_idh.sls" && ! $file =~ "/opt/so/saltstack/local/pillar/minions/adv_" ]]; then echo "Removing idh.services from: $file" so-yaml.py remove "$file" idh.services fi From 8803ad401880a30ef108c70abdb9e8b536aa13df Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 15 May 2024 14:05:48 -0400 Subject: [PATCH 576/777] Update enabled.sls --- salt/soc/enabled.sls | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/soc/enabled.sls b/salt/soc/enabled.sls index 38e1fd3fe..990bf210c 100644 --- a/salt/soc/enabled.sls +++ b/salt/soc/enabled.sls @@ -45,6 +45,7 @@ so-soc: - /opt/so/conf/soc/queue:/opt/sensoroni/queue:rw - /opt/so/saltstack:/opt/so/saltstack:rw - /opt/so/conf/soc/migrations:/opt/so/conf/soc/migrations:rw + - /nsm/backup/detections-migration:/nsm/backup/detections-migration:ro - extra_hosts: {% for node in DOCKER_EXTRA_HOSTS %} {% for hostname, ip in node.items() %} From 3a56058f7ff62c33d5c485d7a3934711fb632ef0 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 15 May 2024 15:31:31 -0400 Subject: [PATCH 577/777] update description --- salt/manager/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index df722e2ed..14d914df8 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -438,7 +438,7 @@ post_to_2.4.60() { } post_to_2.4.70() { - echo "Removing idh.services from IDH node pillar files" + echo "Removing idh.services from any existing IDH node pillar files" for file in /opt/so/saltstack/local/pillar/minions/*.sls; do if [[ $file =~ "_idh.sls" && ! $file =~ "/opt/so/saltstack/local/pillar/minions/adv_" ]]; then echo "Removing idh.services from: $file" From 6af030848246048f60b75c3b6f3216b184388402 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 15 May 2024 16:26:44 -0400 Subject: [PATCH 578/777] add a newline --- .../tools/sbin_jinja/so-elastic-agent-gen-installers | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-agent-gen-installers b/salt/elasticfleet/tools/sbin_jinja/so-elastic-agent-gen-installers index ff46a3e07..1e4222cae 100755 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-agent-gen-installers +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-agent-gen-installers @@ -72,5 +72,5 @@ do printf "\n### $GOOS/$GOARCH Installer Generated...\n" done -printf "\n### Cleaning up temp files in /nsm/elastic-agent-workspace" +printf "\n### Cleaning up temp files in /nsm/elastic-agent-workspace\n" rm -rf /nsm/elastic-agent-workspace From b4aec9a9d02543338959b79120cd18f941371db2 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Wed, 15 May 2024 16:29:21 -0400 Subject: [PATCH 579/777] alphabetical order --- salt/soc/defaults.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index ca64c6b7b..1f9fe686b 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -2182,9 +2182,9 @@ soc: manualSync: customEnabled: false labels: - - Suricata - - Strelka - ElastAlert + - Strelka + - Suricata eventFields: default: - so_detection.title From 8076ea0e0aae1562f31b7d50c7a44f0665e53090 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 15 May 2024 16:34:05 -0400 Subject: [PATCH 580/777] add another space --- salt/manager/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 14d914df8..d9d8c298f 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -438,7 +438,7 @@ post_to_2.4.60() { } post_to_2.4.70() { - echo "Removing idh.services from any existing IDH node pillar files" + printf "\nRemoving idh.services from any existing IDH node pillar files\n" for file in /opt/so/saltstack/local/pillar/minions/*.sls; do if [[ $file =~ "_idh.sls" && ! $file =~ "/opt/so/saltstack/local/pillar/minions/adv_" ]]; then echo "Removing idh.services from: $file" From ab9ec2ec6b0002da76c671dfc8e2202aa64d01b2 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 15 May 2024 18:04:01 -0400 Subject: [PATCH 581/777] Update soup --- salt/manager/tools/sbin/soup | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index d9d8c298f..525fce3f6 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -663,6 +663,7 @@ suricata_idstools_migration() { #Tell SOC to migrate mkdir -p /opt/so/conf/soc/migrations echo "0" > /opt/so/conf/soc/migrations/suricata-migration-2.4.70 + chown -R socore:socore /opt/so/conf/soc/migrations } playbook_migration() { From 477181036112e7d8929e8cf0f5b4a835c2d4cad1 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 15 May 2024 19:10:50 -0400 Subject: [PATCH 582/777] exclude detect-parse errors --- salt/common/tools/sbin/so-log-check | 1 + salt/suricata/soc_suricata.yaml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index 67eff6d54..5bee4d254 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -202,6 +202,7 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|parsing_exception" # Elastalert EQL parsing issue. Temp. EXCLUDED_ERRORS="$EXCLUDED_ERRORS|context deadline exceeded" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Error running query:" # Specific issues with detection rules + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|detect-parse" # Suricata encountering a malformed rule fi RESULT=0 diff --git a/salt/suricata/soc_suricata.yaml b/salt/suricata/soc_suricata.yaml index 75ad1e476..e157ff852 100644 --- a/salt/suricata/soc_suricata.yaml +++ b/salt/suricata/soc_suricata.yaml @@ -12,7 +12,7 @@ suricata: title: SIDS helpLink: suricata.html readonlyUi: True - advanced: true + advanced: True classification: classification__config: description: Classifications config file. From 9796354b4841e4408b5bc349b3a643d025e72a8e Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 16 May 2024 14:27:32 -0400 Subject: [PATCH 583/777] dont merge policy from global_overrides if not defined in default index_settings --- salt/elasticsearch/template.map.jinja | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/salt/elasticsearch/template.map.jinja b/salt/elasticsearch/template.map.jinja index f5a124a9a..8d40d9e4d 100644 --- a/salt/elasticsearch/template.map.jinja +++ b/salt/elasticsearch/template.map.jinja @@ -19,6 +19,12 @@ {% set ES_INDEX_SETTINGS = {} %} {% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES.update(salt['defaults.merge'](ES_INDEX_SETTINGS_GLOBAL_OVERRIDES, ES_INDEX_PILLAR, in_place=False)) %} {% for index, settings in ES_INDEX_SETTINGS_GLOBAL_OVERRIDES.items() %} +{# if policy isn't defined in the original index settings, then dont merge policy from the global_overrides #} +{# this will prevent so-elasticsearch-ilm-policy-load from trying to load policy on non ILM manged indices #} +{% if not ES_INDEX_SETTINGS_ORIG[index].policy is defined %} +{% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].pop('policy') %} +{% endif %} + {% if settings.index_template is defined %} {% if not settings.get('index_sorting', False) | to_bool and settings.index_template.template.settings.index.sort is defined %} {% do settings.index_template.template.settings.index.pop('sort') %} From 9d4668f4d32205ffabd7fc0d96708decdea6ff21 Mon Sep 17 00:00:00 2001 From: Josh Patterson Date: Thu, 16 May 2024 15:45:55 -0400 Subject: [PATCH 584/777] Revert "dont merge policy from global_overrides if not defined in default index_settings" --- salt/elasticsearch/template.map.jinja | 6 ------ 1 file changed, 6 deletions(-) diff --git a/salt/elasticsearch/template.map.jinja b/salt/elasticsearch/template.map.jinja index 8d40d9e4d..f5a124a9a 100644 --- a/salt/elasticsearch/template.map.jinja +++ b/salt/elasticsearch/template.map.jinja @@ -19,12 +19,6 @@ {% set ES_INDEX_SETTINGS = {} %} {% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES.update(salt['defaults.merge'](ES_INDEX_SETTINGS_GLOBAL_OVERRIDES, ES_INDEX_PILLAR, in_place=False)) %} {% for index, settings in ES_INDEX_SETTINGS_GLOBAL_OVERRIDES.items() %} -{# if policy isn't defined in the original index settings, then dont merge policy from the global_overrides #} -{# this will prevent so-elasticsearch-ilm-policy-load from trying to load policy on non ILM manged indices #} -{% if not ES_INDEX_SETTINGS_ORIG[index].policy is defined %} -{% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].pop('policy') %} -{% endif %} - {% if settings.index_template is defined %} {% if not settings.get('index_sorting', False) | to_bool and settings.index_template.template.settings.index.sort is defined %} {% do settings.index_template.template.settings.index.pop('sort') %} From b54632080ee4962985a2b50fd98bc5ae55c384fa Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 16 May 2024 16:04:17 -0400 Subject: [PATCH 585/777] check if exists in override before popping --- salt/elasticsearch/template.map.jinja | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/template.map.jinja b/salt/elasticsearch/template.map.jinja index 8d40d9e4d..4a90a4f54 100644 --- a/salt/elasticsearch/template.map.jinja +++ b/salt/elasticsearch/template.map.jinja @@ -21,7 +21,7 @@ {% for index, settings in ES_INDEX_SETTINGS_GLOBAL_OVERRIDES.items() %} {# if policy isn't defined in the original index settings, then dont merge policy from the global_overrides #} {# this will prevent so-elasticsearch-ilm-policy-load from trying to load policy on non ILM manged indices #} -{% if not ES_INDEX_SETTINGS_ORIG[index].policy is defined %} +{% if not ES_INDEX_SETTINGS_ORIG[index].policy is defined and ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].policy is defined %} {% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].pop('policy') %} {% endif %} From cc6cb346e796569d5a4d0166ae43d3b43113e751 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 16 May 2024 16:31:45 -0400 Subject: [PATCH 586/777] fix issue/13030 --- salt/elasticsearch/template.map.jinja | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/salt/elasticsearch/template.map.jinja b/salt/elasticsearch/template.map.jinja index 4a90a4f54..b59c291a4 100644 --- a/salt/elasticsearch/template.map.jinja +++ b/salt/elasticsearch/template.map.jinja @@ -2,11 +2,9 @@ {% set DEFAULT_GLOBAL_OVERRIDES = ELASTICSEARCHDEFAULTS.elasticsearch.index_settings.pop('global_overrides') %} {% set PILLAR_GLOBAL_OVERRIDES = {} %} -{% if salt['pillar.get']('elasticsearch:index_settings') is defined %} -{% set ES_INDEX_PILLAR = salt['pillar.get']('elasticsearch:index_settings') %} -{% if ES_INDEX_PILLAR.global_overrides is defined %} -{% set PILLAR_GLOBAL_OVERRIDES = ES_INDEX_PILLAR.pop('global_overrides') %} -{% endif %} +{% set ES_INDEX_PILLAR = salt['pillar.get']('elasticsearch:index_settings', {}) %} +{% if ES_INDEX_PILLAR.global_overrides is defined %} +{% set PILLAR_GLOBAL_OVERRIDES = ES_INDEX_PILLAR.pop('global_overrides') %} {% endif %} {% set ES_INDEX_SETTINGS_ORIG = ELASTICSEARCHDEFAULTS.elasticsearch.index_settings %} From 34a5985311bd64ccb30aab66eac4eaf2a7694262 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 16 May 2024 21:14:57 -0400 Subject: [PATCH 587/777] Create tpm enrollment script Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/common/tools/sbin/so-luks-tpm-enroll | 60 +++++++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 salt/common/tools/sbin/so-luks-tpm-enroll diff --git a/salt/common/tools/sbin/so-luks-tpm-enroll b/salt/common/tools/sbin/so-luks-tpm-enroll new file mode 100644 index 000000000..f08fe5e0b --- /dev/null +++ b/salt/common/tools/sbin/so-luks-tpm-enroll @@ -0,0 +1,60 @@ +#!/bin/bash +# +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0." + +# This script is intended to be used in the case the ISO install did not properly setup TPM decrypt for LUKS partitions at boot. + +check_for_tpm() { + echo -n "Checking for TPM: " + if [ -d /sys/class/tpm/tpm0 ]; then + echo -e "tpm0 found." + TPM="yes" + # Check if TPM is using sha1 or sha256 + if [ -d /sys/class/tpm/tpm0/pcr-sha1 ]; then + echo -e "TPM is using sha1.\n" + TPM_PCR="sha1" + elif [ -d /sys/class/tpm/tpm0/pcr-sha256 ]; then + echo -e "TPM is using sha256.\n" + TPM_PCR="sha256" + fi + else + echo -e "No TPM found.\n" + exit 1 + fi +} + +check_for_luks_partitions() { + echo "Checking for LUKS partitions" + for part in $(lsblk -o NAME,FSTYPE -ln | grep crypto_LUKS | awk '{print $1}'); do + echo "Found LUKS partition: $part" + LUKS_PARTITIONS+=("$part") + done + if [ ${#LUKS_PARTITIONS[@]} -eq 0 ]; then + echo -e "No LUKS partitions found.\n" + exit 1 + fi + echo "" +} + +enroll_tpm_in_luks() { + read -s -p "Enter the LUKS passphrase used during ISO install: " LUKS_PASSPHRASE + echo "" + for part in "${LUKS_PARTITIONS[@]}"; do + echo "Enrolling TPM for LUKS device: /dev/$part" + if [ "$TPM_PCR" == "sha1" ]; then + clevis luks bind -d /dev/$part tpm2 '{"pcr_bank":"sha1","pcr_ids":"7"}' <<< $LUKS_PASSPHRASE + elif [ "$TPM_PCR" == "sha256" ]; then + clevis luks bind -d /dev/$part tpm2 '{"pcr_bank":"sha256","pcr_ids":"7"}' <<< $LUKS_PASSPHRASE + fi + done + echo "Running dracut" + dracut -fv --no-kernel +} + +check_for_tpm +check_for_luks_partitions +enroll_tpm_in_luks +echo -e "\nTPM enrollment complete. Reboot the system to verify the TPM is correctly decrypting the LUKS partition(s) at boot.\n" \ No newline at end of file From 1c4d36760afd9be9dcbdf47d9e1e62bc0edaef9d Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Fri, 17 May 2024 14:49:39 -0400 Subject: [PATCH 588/777] add support for custom alerters --- salt/elastalert/defaults.yaml | 1 + salt/elastalert/map.jinja | 5 +++++ salt/elastalert/soc_elastalert.yaml | 8 ++++++++ salt/soc/soc_soc.yaml | 7 +++++++ 4 files changed, 21 insertions(+) diff --git a/salt/elastalert/defaults.yaml b/salt/elastalert/defaults.yaml index a01c80952..393932992 100644 --- a/salt/elastalert/defaults.yaml +++ b/salt/elastalert/defaults.yaml @@ -1,5 +1,6 @@ elastalert: enabled: False + alerter_parameters: "" config: rules_folder: /opt/elastalert/rules/ scan_subdirectories: true diff --git a/salt/elastalert/map.jinja b/salt/elastalert/map.jinja index cc395d8ee..3db17d32b 100644 --- a/salt/elastalert/map.jinja +++ b/salt/elastalert/map.jinja @@ -13,3 +13,8 @@ {% do ELASTALERTDEFAULTS.elastalert.config.update({'es_password': pillar.elasticsearch.auth.users.so_elastic_user.pass}) %} {% set ELASTALERTMERGED = salt['pillar.get']('elastalert', ELASTALERTDEFAULTS.elastalert, merge=True) %} + +{% set params = ELASTALERTMERGED.alerter_parameters | load_yaml %} +{% if params != None %} + {% do ELASTALERTMERGED.config.update(params) %} +{% endif %} diff --git a/salt/elastalert/soc_elastalert.yaml b/salt/elastalert/soc_elastalert.yaml index cde09b83e..eec3f3866 100644 --- a/salt/elastalert/soc_elastalert.yaml +++ b/salt/elastalert/soc_elastalert.yaml @@ -2,6 +2,14 @@ elastalert: enabled: description: You can enable or disable Elastalert. helpLink: elastalert.html + alerter_parameters: + title: Alerter Parameters + description: Custom configuration parameters for additional, optional alerters that can be enabled for all Sigma rules. Filter for 'Additional Alerters' in this Configuration screen to find the setting that allows these alerters to be enabled within the SOC ElastAlert module. Use YAML format for these parameters, and reference the ElastAlert 2 documentation, located at https://elastalert2.readthedocs.io, for available alerters and their required configuration parameters. + global: True + multiline: True + syntax: yaml + helpLink: elastalert.html + forcedType: string config: disable_rules_on_error: description: Disable rules on failure. diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index fa8d80bc8..bc1c49185 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -83,6 +83,13 @@ soc: advanced: True modules: elastalertengine: + additionalAlerters: + title: Additional Alerters + description: Specify additional alerters to enable for all Sigma rules, one alerter name per line. Alerters refers to ElastAlert 2 alerters, as documented at https://elastalert2.readthedocs.io. Note that the configuration parameters for these alerters must be provided in the ElastAlert configuration section. Filter for 'Alerter Parameters' to find this related setting. + global: True + helpLink: sigma.html + forcedType: "[]string" + multiline: True allowRegex: description: 'Regex used to filter imported Sigma rules. Deny regex takes precedence over the Allow regex setting.' global: True From d9edff38df40ebe31618c311049a455f4feb8798 Mon Sep 17 00:00:00 2001 From: weslambert Date: Fri, 17 May 2024 16:10:10 -0400 Subject: [PATCH 589/777] Create compile report for SOC integrity check --- salt/strelka/compile_yara/compile_yara.py | 41 +++++++++++++++++++---- 1 file changed, 35 insertions(+), 6 deletions(-) diff --git a/salt/strelka/compile_yara/compile_yara.py b/salt/strelka/compile_yara/compile_yara.py index b6fa95899..cac60ad60 100644 --- a/salt/strelka/compile_yara/compile_yara.py +++ b/salt/strelka/compile_yara/compile_yara.py @@ -3,10 +3,13 @@ # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. +import argparse +import glob +import hashlib +import json import os import yara -import glob -import json +from datetime import datetime from concurrent.futures import ThreadPoolExecutor def check_syntax(rule_file): @@ -25,19 +28,24 @@ def compile_yara_rules(rules_dir): files_to_compile = {} removed_count = 0 success_count = 0 - + # Use ThreadPoolExecutor to parallelize syntax checks with ThreadPoolExecutor() as executor: results = executor.map(check_syntax, rule_files) - + # Collect yara files and prepare for batch compilation + ts = str(datetime.utcnow().isoformat()) + failure_ids = [] + success_ids = [] for success, rule_file, error_message in results: + rule_id = os.path.splitext(os.path.basename(rule_file))[0] if success: files_to_compile[os.path.basename(rule_file)] = rule_file success_count += 1 + success_ids.append(rule_id) else: + failure_ids.append(rule_id) # Extract just the UUID from the rule file name - rule_id = os.path.splitext(os.path.basename(rule_file))[0] log_entry = { "event_module": "soc", "event_dataset": "soc.detections", @@ -55,16 +63,37 @@ def compile_yara_rules(rules_dir): removed_count += 1 # Compile all remaining valid rules into a single file + compiled_sha256="" if files_to_compile: compiled_rules = yara.compile(filepaths=files_to_compile) compiled_rules.save(compiled_rules_path) print(f"All remaining rules compiled and saved into {compiled_rules_path}") + # Hash file + with open(compiled_rules_path, 'rb') as hash_file: + compiled_sha256=hashlib.sha256(hash_file.read()).hexdigest() # Remove the rules.compiled if there aren't any files to be compiled else: if os.path.exists(compiled_rules_path): os.remove(compiled_rules_path) + # Create compilation report + compilation_report = { + "timestamp": ts, + "compiled_sha256": compiled_sha256, + "failure": failure_ids, + "success": success_ids + } + + # Write total + with open('/opt/sensoroni/logs/detections_yara_compilation-total.log', 'w+') as report_file: + json.dump(compilation_report, report_file) + # Print summary of compilation results print(f"Summary: {success_count} rules compiled successfully, {removed_count} rules removed due to errors.") -compile_yara_rules("/opt/sensoroni/yara/rules/") +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Compile YARA rules from the specified directory") + parser.add_argument("rules_dir", help="Directory containing YARA rules to compile") + args = parser.parse_args() + +compile_yara_rules(args.rules_dir) From 0cc57fc24092145839fbee701867ef3132f6add1 Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Fri, 17 May 2024 15:47:23 -0600 Subject: [PATCH 590/777] Change Compilation Report Path Move compilation report path to /opt/so/state and mount that foulder in SOC --- salt/soc/enabled.sls | 1 + salt/strelka/compile_yara/compile_yara.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/soc/enabled.sls b/salt/soc/enabled.sls index 990bf210c..4d4b5f6fd 100644 --- a/salt/soc/enabled.sls +++ b/salt/soc/enabled.sls @@ -46,6 +46,7 @@ so-soc: - /opt/so/saltstack:/opt/so/saltstack:rw - /opt/so/conf/soc/migrations:/opt/so/conf/soc/migrations:rw - /nsm/backup/detections-migration:/nsm/backup/detections-migration:ro + - /opt/so/state:/opt/so/state:rw - extra_hosts: {% for node in DOCKER_EXTRA_HOSTS %} {% for hostname, ip in node.items() %} diff --git a/salt/strelka/compile_yara/compile_yara.py b/salt/strelka/compile_yara/compile_yara.py index cac60ad60..09e3f4680 100644 --- a/salt/strelka/compile_yara/compile_yara.py +++ b/salt/strelka/compile_yara/compile_yara.py @@ -85,7 +85,7 @@ def compile_yara_rules(rules_dir): } # Write total - with open('/opt/sensoroni/logs/detections_yara_compilation-total.log', 'w+') as report_file: + with open('/opt/so/state/detections_yara_compilation-total.log', 'w+') as report_file: json.dump(compilation_report, report_file) # Print summary of compilation results From fcc72a4f4ec9d31843bd7e620223735cc649a222 Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Mon, 20 May 2024 11:23:25 -0600 Subject: [PATCH 591/777] Add Default IntegrityCheck Frequency Values --- salt/soc/defaults.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 1f9fe686b..0113f22cc 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1305,6 +1305,7 @@ soc: reposFolder: /opt/sensoroni/sigma/repos rulesFingerprintFile: /opt/sensoroni/fingerprints/sigma.fingerprint stateFilePath: /opt/sensoroni/fingerprints/elastalertengine.state + integrityCheckFrequencySeconds: 600 rulesRepos: default: - repo: https://github.com/Security-Onion-Solutions/securityonion-resources @@ -1383,6 +1384,7 @@ soc: community: true yaraRulesFolder: /opt/sensoroni/yara/rules stateFilePath: /opt/sensoroni/fingerprints/strelkaengine.state + integrityCheckFrequencySeconds: 600 suricataengine: allowRegex: '' autoUpdateEnabled: true @@ -1393,6 +1395,7 @@ soc: denyRegex: '' rulesFingerprintFile: /opt/sensoroni/fingerprints/emerging-all.fingerprint stateFilePath: /opt/sensoroni/fingerprints/suricataengine.state + integrityCheckFrequencySeconds: 600 client: enableReverseLookup: false docsUrl: /docs/ From 6fac6eebceeacc74b6b595e9c7eedf26fa907305 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Mon, 20 May 2024 14:37:54 -0400 Subject: [PATCH 592/777] Helper script for enrolling tpm into luks Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- .../{so-luks-tpm-enroll => so-luks-tpm-regen} | 46 +++++++++++++++++-- 1 file changed, 42 insertions(+), 4 deletions(-) rename salt/common/tools/sbin/{so-luks-tpm-enroll => so-luks-tpm-regen} (67%) diff --git a/salt/common/tools/sbin/so-luks-tpm-enroll b/salt/common/tools/sbin/so-luks-tpm-regen similarity index 67% rename from salt/common/tools/sbin/so-luks-tpm-enroll rename to salt/common/tools/sbin/so-luks-tpm-regen index f08fe5e0b..50058b504 100644 --- a/salt/common/tools/sbin/so-luks-tpm-enroll +++ b/salt/common/tools/sbin/so-luks-tpm-regen @@ -5,7 +5,33 @@ # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0." +set -e # This script is intended to be used in the case the ISO install did not properly setup TPM decrypt for LUKS partitions at boot. +if [ -z $NOROOT ]; then + # Check for prerequisites + if [ "$(id -u)" -ne 0 ]; then + echo "This script must be run using sudo!" + exit 1 + fi +fi +ENROLL_TPM=N + +while [[ $# -gt 0 ]]; do + case $1 in + --enroll-tpm) + ENROLL_TPM=Y + ;; + *) + echo "Usage: $0 [options]" + echo "" + echo "where options are:" + echo " --enroll-tpm for when TPM enrollment was not selected during ISO install." + echo "" + exit 1 + ;; + esac + shift +done check_for_tpm() { echo -n "Checking for TPM: " @@ -50,11 +76,23 @@ enroll_tpm_in_luks() { clevis luks bind -d /dev/$part tpm2 '{"pcr_bank":"sha256","pcr_ids":"7"}' <<< $LUKS_PASSPHRASE fi done - echo "Running dracut" - dracut -fv --no-kernel + } + +regenerate_tpm_enrollment_token() { + for part in "${LUKS_PARTITIONS[@]}"; do + clevis luks regen -d /dev/$part -s 1 -q + done } check_for_tpm check_for_luks_partitions -enroll_tpm_in_luks -echo -e "\nTPM enrollment complete. Reboot the system to verify the TPM is correctly decrypting the LUKS partition(s) at boot.\n" \ No newline at end of file + +if [[ $ENROLL_TPM == "Y" ]]; then + enroll_tpm_in_luks +else + regenerate_tpm_enrollment_token +fi + +echo "Running dracut" +dracut -fv +echo -e "\nTPM configuration complete. Reboot the system to verify the TPM is correctly decrypting the LUKS partition(s) at boot.\n" \ No newline at end of file From 026023fd0a2c1643b5972d2198391c33972ba602 Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Mon, 20 May 2024 14:35:11 -0600 Subject: [PATCH 593/777] Annotate integrityCheckFrequencySeconds per det engine --- salt/soc/soc_soc.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index bc1c49185..1e6a915b0 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -113,6 +113,9 @@ soc: global: True advanced: True helpLink: sigma.html + integrityCheckFrequencySeconds: + description: 'How often the ElastAlert integrity checker runs (in seconds). This verifies the integrity of deployed rules.' + global: True rulesRepos: default: &eerulesRepos description: "Custom Git repos to pull Sigma rules from. 'license' field is required, 'folder' is optional. 'community' disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled." @@ -211,6 +214,9 @@ soc: global: True advanced: True helpLink: yara.html + integrityCheckFrequencySeconds: + description: 'How often the Strelka integrity checker runs (in seconds). This verifies the integrity of deployed rules.' + global: True rulesRepos: default: &serulesRepos description: "Custom Git repos to pull YARA rules from. 'license' field is required, 'folder' is optional. 'community' disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled." @@ -235,6 +241,9 @@ soc: global: True advanced: True helpLink: suricata.html + integrityCheckFrequencySeconds: + description: 'How often the Suricata integrity checker runs (in seconds). This verifies the integrity of deployed rules.' + global: True client: enableReverseLookup: description: Set to true to enable reverse DNS lookups for IP addresses in the SOC UI. From 6e97c39f5886c4e6816fd82c2472961f775c4a30 Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Mon, 20 May 2024 14:52:05 -0600 Subject: [PATCH 594/777] Marked as Advanced --- salt/soc/soc_soc.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index 1e6a915b0..0cbb99e62 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -116,6 +116,7 @@ soc: integrityCheckFrequencySeconds: description: 'How often the ElastAlert integrity checker runs (in seconds). This verifies the integrity of deployed rules.' global: True + advanced: True rulesRepos: default: &eerulesRepos description: "Custom Git repos to pull Sigma rules from. 'license' field is required, 'folder' is optional. 'community' disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled." @@ -217,6 +218,7 @@ soc: integrityCheckFrequencySeconds: description: 'How often the Strelka integrity checker runs (in seconds). This verifies the integrity of deployed rules.' global: True + advanced: True rulesRepos: default: &serulesRepos description: "Custom Git repos to pull YARA rules from. 'license' field is required, 'folder' is optional. 'community' disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled." @@ -244,6 +246,7 @@ soc: integrityCheckFrequencySeconds: description: 'How often the Suricata integrity checker runs (in seconds). This verifies the integrity of deployed rules.' global: True + advanced: True client: enableReverseLookup: description: Set to true to enable reverse DNS lookups for IP addresses in the SOC UI. From 6b2219b7f26d5244f9f0285c90400769761e31c2 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Mon, 20 May 2024 18:52:37 -0400 Subject: [PATCH 595/777] elastalert settings --- salt/elastalert/config.sls | 29 ++++++++++++++++ salt/elastalert/defaults.yaml | 2 +- salt/elastalert/enabled.sls | 2 ++ salt/elastalert/map.jinja | 21 ++++++++++-- salt/elastalert/soc_elastalert.yaml | 51 ++++++++++++++++++++++++++++- salt/soc/soc_soc.yaml | 2 +- salt/stig/soc_stig.yaml | 2 +- 7 files changed, 102 insertions(+), 7 deletions(-) diff --git a/salt/elastalert/config.sls b/salt/elastalert/config.sls index 252aa83c0..1251c9d19 100644 --- a/salt/elastalert/config.sls +++ b/salt/elastalert/config.sls @@ -82,6 +82,35 @@ elastasomodulesync: - group: 933 - makedirs: True +elastacustomdir: + file.directory: + - name: /opt/so/conf/elastalert/custom + - user: 933 + - group: 933 + - makedirs: True + +elastacustomsync: + file.recurse: + - name: /opt/so/conf/elastalert/custom + - source: salt://elastalert/files/custom + - user: 933 + - group: 933 + - makedirs: True + - template: jinja + - mode: 660 + - context: + elastalert: {{ ELASTALERTMERGED }} + - show_changes: False + +elastapredefinedsync: + file.recurse: + - name: /opt/so/conf/elastalert/predefined + - source: salt://elastalert/files/predefined + - user: 933 + - group: 933 + - makedirs: True + - show_changes: False + elastaconf: file.managed: - name: /opt/so/conf/elastalert/elastalert_config.yaml diff --git a/salt/elastalert/defaults.yaml b/salt/elastalert/defaults.yaml index 393932992..8021533ab 100644 --- a/salt/elastalert/defaults.yaml +++ b/salt/elastalert/defaults.yaml @@ -40,4 +40,4 @@ elastalert: level: INFO handlers: - file - propagate: false + propagate: False \ No newline at end of file diff --git a/salt/elastalert/enabled.sls b/salt/elastalert/enabled.sls index e4b3642db..6a1ff1440 100644 --- a/salt/elastalert/enabled.sls +++ b/salt/elastalert/enabled.sls @@ -30,6 +30,8 @@ so-elastalert: - /opt/so/rules/elastalert:/opt/elastalert/rules/:ro - /opt/so/log/elastalert:/var/log/elastalert:rw - /opt/so/conf/elastalert/modules/:/opt/elastalert/modules/:ro + - /opt/so/conf/elastalert/predefined/:/opt/elastalert/predefined/:ro + - /opt/so/conf/elastalert/custom/:/opt/elastalert/custom/:ro - /opt/so/conf/elastalert/elastalert_config.yaml:/opt/elastalert/config.yaml:ro {% if DOCKER.containers['so-elastalert'].custom_bind_mounts %} {% for BIND in DOCKER.containers['so-elastalert'].custom_bind_mounts %} diff --git a/salt/elastalert/map.jinja b/salt/elastalert/map.jinja index 3db17d32b..8d4e65652 100644 --- a/salt/elastalert/map.jinja +++ b/salt/elastalert/map.jinja @@ -14,7 +14,22 @@ {% set ELASTALERTMERGED = salt['pillar.get']('elastalert', ELASTALERTDEFAULTS.elastalert, merge=True) %} -{% set params = ELASTALERTMERGED.alerter_parameters | load_yaml %} -{% if params != None %} - {% do ELASTALERTMERGED.config.update(params) %} +{% if 'ntf' in salt['pillar.get']('features', []) %} + {% set params = ELASTALERTMERGED.alerter_parameters | load_yaml %} + {% if params != None %} + {% do ELASTALERTMERGED.config.update(params) %} + {% endif %} + + {% if ELASTALERTMERGED.smtp_user | length > 0 %} + {% do ELASTALERTMERGED.config.update({'smtp_auth_file': '/opt/elastalert/predefined/smtp_auth.yaml'}) %} + {% endif %} + + {% if ELASTALERTMERGED.smtp_user | length > 0 %} + {% do ELASTALERTMERGED.config.update({'smtp_auth_file': '/opt/elastalert/predefined/smtp_auth.yaml'}) %} + {% endif %} + + {% if ELASTALERTMERGED.jira_user | length > 0 or ELASTALERTMERGED.jira_key | length > 0 %} + {% do ELASTALERTMERGED.config.update({'jira_account_file': '/opt/elastalert/predefined/jira_auth.yaml'}) %} + {% endif %} + {% endif %} diff --git a/salt/elastalert/soc_elastalert.yaml b/salt/elastalert/soc_elastalert.yaml index eec3f3866..81df0541f 100644 --- a/salt/elastalert/soc_elastalert.yaml +++ b/salt/elastalert/soc_elastalert.yaml @@ -4,12 +4,61 @@ elastalert: helpLink: elastalert.html alerter_parameters: title: Alerter Parameters - description: Custom configuration parameters for additional, optional alerters that can be enabled for all Sigma rules. Filter for 'Additional Alerters' in this Configuration screen to find the setting that allows these alerters to be enabled within the SOC ElastAlert module. Use YAML format for these parameters, and reference the ElastAlert 2 documentation, located at https://elastalert2.readthedocs.io, for available alerters and their required configuration parameters. + description: Optional configuration parameters for additional alerters that can be enabled for all Sigma rules. Filter for 'Alerter' in this Configuration screen to find the setting that allows these alerters to be enabled within the SOC ElastAlert module. Use YAML format for these parameters, and reference the ElastAlert 2 documentation, located at https://elastalert2.readthedocs.io, for available alerters and their required configuration parameters. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key. global: True multiline: True syntax: yaml helpLink: elastalert.html forcedType: string + jira_api_key: + title: Jira API Key + description: Optional configuration parameter for Jira API Key, used instead of the Jira username and password. Requires a valid Security Onion license key. + global: True + sensitive: True + helpLink: elastalert.html + forcedType: string + jira_pass: + title: Jira Password + description: Optional configuration parameter for Jira password. Requires a valid Security Onion license key. + global: True + sensitive: True + helpLink: elastalert.html + forcedType: string + jira_user: + title: Jira Username + description: Optional configuration parameter for Jira username. Requires a valid Security Onion license key. + global: True + helpLink: elastalert.html + forcedType: string + smtp_pass: + title: SMTP Password + description: Optional configuration parameter for SMTP password, required for authenticating email servers. Requires a valid Security Onion license key. + global: True + sensitive: True + helpLink: elastalert.html + forcedType: string + smtp_user: + title: SMTP Username + description: Optional configuration parameter for SMTP username, required for authenticating email servers. Requires a valid Security Onion license key. + global: True + helpLink: elastalert.html + forcedType: string + opsgenie_key: + title: OpsGenie API Key + description: Optional configuration parameter for OpsGenie API Key. Requires a valid Security Onion license key. + global: True + sensitive: True + helpLink: elastalert.html + forcedType: string + files: + custom: + filename__ext: + title: Custom Parameter File + description: Optional configuration file that can be used to specify custom file contents, such as a SMTP certificate file. When used, the corresponding parameter must be set to this setting's filename.ext path inside the custom subdirectory. For example, if specifying the SMTP cert file, the smtp_cert_file key must be set to /opt/elastalert/custom/smtp.crt in the Alerter Parameters setting for this certificate to be enabled, and assumes this duplicated setting has been named smtp__crt. Note that double underscores will be replaced with a period in the filename. + global: True + duplicating: True + file: True + helpLink: elastalert.html config: disable_rules_on_error: description: Disable rules on failure. diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index bc1c49185..7367c030d 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -85,7 +85,7 @@ soc: elastalertengine: additionalAlerters: title: Additional Alerters - description: Specify additional alerters to enable for all Sigma rules, one alerter name per line. Alerters refers to ElastAlert 2 alerters, as documented at https://elastalert2.readthedocs.io. Note that the configuration parameters for these alerters must be provided in the ElastAlert configuration section. Filter for 'Alerter Parameters' to find this related setting. + description: Specify additional alerters to enable for all Sigma rules, one alerter name per line. Alerters refers to ElastAlert 2 alerters, as documented at https://elastalert2.readthedocs.io. Note that the configuration parameters for these alerters must be provided in the ElastAlert configuration section. Filter for 'Alerter' to find this related setting. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key. global: True helpLink: sigma.html forcedType: "[]string" diff --git a/salt/stig/soc_stig.yaml b/salt/stig/soc_stig.yaml index 1fb030c31..597aab809 100644 --- a/salt/stig/soc_stig.yaml +++ b/salt/stig/soc_stig.yaml @@ -1,6 +1,6 @@ stig: enabled: - description: You can enable or disable the application of STIGS using oscap. Note that the actions performed by OSCAP are not automatically reversible. + description: You can enable or disable the application of STIGS using oscap. Note that the actions performed by OSCAP are not automatically reversible. Requires a valid Security Onion license key. forcedType: bool advanced: True run_interval: From c594168b650f83fb015b7d6cb8dd6180ba225294 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Mon, 20 May 2024 19:05:43 -0400 Subject: [PATCH 596/777] elastalert settings --- salt/elastalert/config.sls | 3 ++- salt/elastalert/files/custom/placeholder | 1 + salt/elastalert/files/predefined/jira_auth.yaml | 6 ++++++ salt/elastalert/files/predefined/smtp_auth.yaml | 2 ++ 4 files changed, 11 insertions(+), 1 deletion(-) create mode 100644 salt/elastalert/files/custom/placeholder create mode 100644 salt/elastalert/files/predefined/jira_auth.yaml create mode 100644 salt/elastalert/files/predefined/smtp_auth.yaml diff --git a/salt/elastalert/config.sls b/salt/elastalert/config.sls index 1251c9d19..0583cadfd 100644 --- a/salt/elastalert/config.sls +++ b/salt/elastalert/config.sls @@ -97,7 +97,7 @@ elastacustomsync: - group: 933 - makedirs: True - template: jinja - - mode: 660 + - file_mode: 660 - context: elastalert: {{ ELASTALERTMERGED }} - show_changes: False @@ -109,6 +109,7 @@ elastapredefinedsync: - user: 933 - group: 933 - makedirs: True + - file_mode: 660 - show_changes: False elastaconf: diff --git a/salt/elastalert/files/custom/placeholder b/salt/elastalert/files/custom/placeholder new file mode 100644 index 000000000..42e4ae4f0 --- /dev/null +++ b/salt/elastalert/files/custom/placeholder @@ -0,0 +1 @@ +THIS IS A PLACEHOLDER FILE \ No newline at end of file diff --git a/salt/elastalert/files/predefined/jira_auth.yaml b/salt/elastalert/files/predefined/jira_auth.yaml new file mode 100644 index 000000000..3f537c92b --- /dev/null +++ b/salt/elastalert/files/predefined/jira_auth.yaml @@ -0,0 +1,6 @@ +{% if elastalert.jira_user | length > 0 %} +user: {{ elastalert.jira_user }} +password: {{ elastalert.jira_pass }} +{% else %} +apikey: {{ elastalert.jira_key }} +{% endif %} \ No newline at end of file diff --git a/salt/elastalert/files/predefined/smtp_auth.yaml b/salt/elastalert/files/predefined/smtp_auth.yaml new file mode 100644 index 000000000..483dd6810 --- /dev/null +++ b/salt/elastalert/files/predefined/smtp_auth.yaml @@ -0,0 +1,2 @@ +user: {{ elastalert.smtp_user }} +password: {{ elastalert.smtp_pass }} \ No newline at end of file From 8a3061fe3e2369a61e7d6c3a7b12752ac9a51528 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Mon, 20 May 2024 19:36:06 -0400 Subject: [PATCH 597/777] elastalert settings --- .../files/predefined/{jira_auth.yaml => jira_auth.yaml.jinja} | 0 .../files/predefined/{smtp_auth.yaml => smtp_auth.yaml.jinja} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename salt/elastalert/files/predefined/{jira_auth.yaml => jira_auth.yaml.jinja} (100%) rename salt/elastalert/files/predefined/{smtp_auth.yaml => smtp_auth.yaml.jinja} (100%) diff --git a/salt/elastalert/files/predefined/jira_auth.yaml b/salt/elastalert/files/predefined/jira_auth.yaml.jinja similarity index 100% rename from salt/elastalert/files/predefined/jira_auth.yaml rename to salt/elastalert/files/predefined/jira_auth.yaml.jinja diff --git a/salt/elastalert/files/predefined/smtp_auth.yaml b/salt/elastalert/files/predefined/smtp_auth.yaml.jinja similarity index 100% rename from salt/elastalert/files/predefined/smtp_auth.yaml rename to salt/elastalert/files/predefined/smtp_auth.yaml.jinja From e2d0b8f4c7caa825de0ae494ebad01b5d2454f6d Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Mon, 20 May 2024 19:38:36 -0400 Subject: [PATCH 598/777] elastalert settings --- salt/elastalert/config.sls | 3 +++ 1 file changed, 3 insertions(+) diff --git a/salt/elastalert/config.sls b/salt/elastalert/config.sls index 0583cadfd..c435a212b 100644 --- a/salt/elastalert/config.sls +++ b/salt/elastalert/config.sls @@ -109,7 +109,10 @@ elastapredefinedsync: - user: 933 - group: 933 - makedirs: True + - template: jinja - file_mode: 660 + - context: + elastalert: {{ ELASTALERTMERGED }} - show_changes: False elastaconf: From f8ce039065e970dfce0a8fc47632b6395e9e1c65 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Mon, 20 May 2024 19:58:12 -0400 Subject: [PATCH 599/777] elastalert settings --- salt/elastalert/files/predefined/jira_auth.yaml.jinja | 4 ++-- salt/elastalert/files/predefined/smtp_auth.yaml.jinja | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/elastalert/files/predefined/jira_auth.yaml.jinja b/salt/elastalert/files/predefined/jira_auth.yaml.jinja index 3f537c92b..8ae240157 100644 --- a/salt/elastalert/files/predefined/jira_auth.yaml.jinja +++ b/salt/elastalert/files/predefined/jira_auth.yaml.jinja @@ -1,6 +1,6 @@ -{% if elastalert.jira_user | length > 0 %} +{% if elastalert.get('jira_user', '') | length > 0 and elastalert.get('jira_pass', '') | length > 0 %} user: {{ elastalert.jira_user }} password: {{ elastalert.jira_pass }} {% else %} -apikey: {{ elastalert.jira_key }} +apikey: {{ elastalert.get('jira_key', '') }} {% endif %} \ No newline at end of file diff --git a/salt/elastalert/files/predefined/smtp_auth.yaml.jinja b/salt/elastalert/files/predefined/smtp_auth.yaml.jinja index 483dd6810..0d488dd5c 100644 --- a/salt/elastalert/files/predefined/smtp_auth.yaml.jinja +++ b/salt/elastalert/files/predefined/smtp_auth.yaml.jinja @@ -1,2 +1,2 @@ -user: {{ elastalert.smtp_user }} -password: {{ elastalert.smtp_pass }} \ No newline at end of file +user: {{ elastalert.get('smtp_user', '') }} +password: {{ elastalert.get('smtp_pass', '') }} \ No newline at end of file From 02b4d37c11e8f162c358bd02929de41d9ce7c24c Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Mon, 20 May 2024 20:00:31 -0400 Subject: [PATCH 600/777] elastalert settings --- .../files/predefined/{jira_auth.yaml.jinja => jira_auth.yaml} | 0 .../files/predefined/{smtp_auth.yaml.jinja => smtp_auth.yaml} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename salt/elastalert/files/predefined/{jira_auth.yaml.jinja => jira_auth.yaml} (100%) rename salt/elastalert/files/predefined/{smtp_auth.yaml.jinja => smtp_auth.yaml} (100%) diff --git a/salt/elastalert/files/predefined/jira_auth.yaml.jinja b/salt/elastalert/files/predefined/jira_auth.yaml similarity index 100% rename from salt/elastalert/files/predefined/jira_auth.yaml.jinja rename to salt/elastalert/files/predefined/jira_auth.yaml diff --git a/salt/elastalert/files/predefined/smtp_auth.yaml.jinja b/salt/elastalert/files/predefined/smtp_auth.yaml similarity index 100% rename from salt/elastalert/files/predefined/smtp_auth.yaml.jinja rename to salt/elastalert/files/predefined/smtp_auth.yaml From b7a4f20c61be693f893c92c008831192a6fce1d0 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Mon, 20 May 2024 20:11:30 -0400 Subject: [PATCH 601/777] elastalert settings --- salt/elastalert/files/predefined/jira_auth.yaml | 2 +- salt/elastalert/files/predefined/smtp_auth.yaml | 2 +- salt/elastalert/soc_elastalert.yaml | 9 +-------- 3 files changed, 3 insertions(+), 10 deletions(-) diff --git a/salt/elastalert/files/predefined/jira_auth.yaml b/salt/elastalert/files/predefined/jira_auth.yaml index 8ae240157..9bf0425c0 100644 --- a/salt/elastalert/files/predefined/jira_auth.yaml +++ b/salt/elastalert/files/predefined/jira_auth.yaml @@ -2,5 +2,5 @@ user: {{ elastalert.jira_user }} password: {{ elastalert.jira_pass }} {% else %} -apikey: {{ elastalert.get('jira_key', '') }} +apikey: {{ elastalert.get('jira_api_key', '') }} {% endif %} \ No newline at end of file diff --git a/salt/elastalert/files/predefined/smtp_auth.yaml b/salt/elastalert/files/predefined/smtp_auth.yaml index 0d488dd5c..d09d101f1 100644 --- a/salt/elastalert/files/predefined/smtp_auth.yaml +++ b/salt/elastalert/files/predefined/smtp_auth.yaml @@ -1,2 +1,2 @@ user: {{ elastalert.get('smtp_user', '') }} -password: {{ elastalert.get('smtp_pass', '') }} \ No newline at end of file +password: {{ elastalert.get('smtp_pass', '') }} diff --git a/salt/elastalert/soc_elastalert.yaml b/salt/elastalert/soc_elastalert.yaml index 81df0541f..ab315c482 100644 --- a/salt/elastalert/soc_elastalert.yaml +++ b/salt/elastalert/soc_elastalert.yaml @@ -43,20 +43,13 @@ elastalert: global: True helpLink: elastalert.html forcedType: string - opsgenie_key: - title: OpsGenie API Key - description: Optional configuration parameter for OpsGenie API Key. Requires a valid Security Onion license key. - global: True - sensitive: True - helpLink: elastalert.html - forcedType: string files: custom: filename__ext: title: Custom Parameter File description: Optional configuration file that can be used to specify custom file contents, such as a SMTP certificate file. When used, the corresponding parameter must be set to this setting's filename.ext path inside the custom subdirectory. For example, if specifying the SMTP cert file, the smtp_cert_file key must be set to /opt/elastalert/custom/smtp.crt in the Alerter Parameters setting for this certificate to be enabled, and assumes this duplicated setting has been named smtp__crt. Note that double underscores will be replaced with a period in the filename. global: True - duplicating: True + duplicates: True file: True helpLink: elastalert.html config: From 03826dd32c02acfdea0a4a94722a5fa853166571 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Tue, 21 May 2024 06:43:07 -0400 Subject: [PATCH 602/777] Update README.md with new Detections screenshot number --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index a990326a8..530a21813 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ Hunt ![Hunt](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/56_hunt.png) Detections -![Detections](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/59_detections.png) +![Detections](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/57_detections.png) PCAP ![PCAP](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/62_pcap.png) From d315b95d77d5e9fed89f03d0bb21ada98d74d8a7 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Tue, 21 May 2024 07:15:19 -0400 Subject: [PATCH 603/777] elastalert settings --- salt/elastalert/config.sls | 3 -- salt/elastalert/soc_elastalert.yaml | 51 ++++++++++++++++++++++++++--- 2 files changed, 47 insertions(+), 7 deletions(-) diff --git a/salt/elastalert/config.sls b/salt/elastalert/config.sls index c435a212b..25d5bf5f8 100644 --- a/salt/elastalert/config.sls +++ b/salt/elastalert/config.sls @@ -96,10 +96,7 @@ elastacustomsync: - user: 933 - group: 933 - makedirs: True - - template: jinja - file_mode: 660 - - context: - elastalert: {{ ELASTALERTMERGED }} - show_changes: False elastapredefinedsync: diff --git a/salt/elastalert/soc_elastalert.yaml b/salt/elastalert/soc_elastalert.yaml index ab315c482..435c5be6a 100644 --- a/salt/elastalert/soc_elastalert.yaml +++ b/salt/elastalert/soc_elastalert.yaml @@ -45,11 +45,54 @@ elastalert: forcedType: string files: custom: - filename__ext: - title: Custom Parameter File - description: Optional configuration file that can be used to specify custom file contents, such as a SMTP certificate file. When used, the corresponding parameter must be set to this setting's filename.ext path inside the custom subdirectory. For example, if specifying the SMTP cert file, the smtp_cert_file key must be set to /opt/elastalert/custom/smtp.crt in the Alerter Parameters setting for this certificate to be enabled, and assumes this duplicated setting has been named smtp__crt. Note that double underscores will be replaced with a period in the filename. + alertmanager_ca__crt: + description: Optional custom Certificate Authority for connecting to an AlertManager server. To utilize this custom file, the alertmanager_ca_certs key must be set to /opt/elastalert/custom/alertmanager_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key. + global: True + file: True + helpLink: elastalert.html + gelf_ca__crt: + description: Optional custom Certificate Authority for connecting to a Graylog server. To utilize this custom file, the graylog_ca_certs key must be set to /opt/elastalert/custom/graylog_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key. + global: True + file: True + helpLink: elastalert.html + http_post_ca__crt: + description: Optional custom Certificate Authority for connecting to a generic HTTP server, via the legacy HTTP POST alerter. To utilize this custom file, the http_post_ca_certs key must be set to /opt/elastalert/custom/http_post2_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key. + global: True + file: True + helpLink: elastalert.html + http_post2_ca__crt: + description: Optional custom Certificate Authority for connecting to a generic HTTP server, via the newer HTTP POST 2 alerter. To utilize this custom file, the http_post2_ca_certs key must be set to /opt/elastalert/custom/http_post2_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key. + global: True + file: True + helpLink: elastalert.html + ms_teams_ca__crt: + description: Optional custom Certificate Authority for connecting to Microsoft Teams server. To utilize this custom file, the ms_teams_ca_certs key must be set to /opt/elastalert/custom/ms_teams_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key. + global: True + file: True + helpLink: elastalert.html + pagerduty_ca__crt: + description: Optional custom Certificate Authority for connecting to PagerDuty server. To utilize this custom file, the pagerduty_ca_certs key must be set to /opt/elastalert/custom/pagerduty_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key. + global: True + file: True + helpLink: elastalert.html + rocket_chat_ca__crt: + description: Optional custom Certificate Authority for connecting to PagerDuty server. To utilize this custom file, the rocket_chart_ca_certs key must be set to /opt/elastalert/custom/rocket_chat_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key. + global: True + file: True + helpLink: elastalert.html + smtp__crt: + description: Optional custom certificate for connecting to an SMTP server. To utilize this custom file, the smtp_cert_file key must be set to /opt/elastalert/custom/smtp.crt in the Alerter Parameters setting. Requires a valid Security Onion license key. + global: True + file: True + helpLink: elastalert.html + smtp__key: + description: Optional custom certificate key for connecting to an SMTP server. To utilize this custom file, the smtp_key_file key must be set to /opt/elastalert/custom/smtp.key in the Alerter Parameters setting. Requires a valid Security Onion license key. + global: True + file: True + helpLink: elastalert.html + slack_ca__crt: + description: Optional custom Certificate Authority for connecting to Slack. To utilize this custom file, the slack_ca_certs key must be set to /opt/elastalert/custom/slack_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key. global: True - duplicates: True file: True helpLink: elastalert.html config: From 8ce19a93b96ff04d35f99b0c403bf6dc1ec0651b Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Tue, 21 May 2024 13:29:20 -0400 Subject: [PATCH 604/777] exclude false positives related to detections --- salt/common/tools/sbin/so-log-check | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index 5bee4d254..cf1691589 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -203,6 +203,8 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|context deadline exceeded" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Error running query:" # Specific issues with detection rules EXCLUDED_ERRORS="$EXCLUDED_ERRORS|detect-parse" # Suricata encountering a malformed rule + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|integrity check failed" # Detections: Exclude false positive due to automated testing + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|syncErrors" # Detections: Not an actual error fi RESULT=0 From d57cc9627f1273401cf25fe515cf9c474100d088 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Tue, 21 May 2024 13:31:50 -0400 Subject: [PATCH 605/777] exclude false positives related to detections --- salt/elastalert/defaults.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elastalert/defaults.yaml b/salt/elastalert/defaults.yaml index 8021533ab..393932992 100644 --- a/salt/elastalert/defaults.yaml +++ b/salt/elastalert/defaults.yaml @@ -40,4 +40,4 @@ elastalert: level: INFO handlers: - file - propagate: False \ No newline at end of file + propagate: false From deb140e38e2223aa1d727a78cbf2e73d8a8f1701 Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 21 May 2024 13:38:52 -0400 Subject: [PATCH 606/777] Exclude detections from template name matching --- .../tools/sbin_jinja/so-elasticsearch-templates-load | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load index 8f45d6c36..080348522 100755 --- a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load +++ b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load @@ -133,7 +133,7 @@ if [ ! -f $STATE_FILE_SUCCESS ]; then for i in $pattern; do TEMPLATE=${i::-14} COMPONENT_PATTERN=${TEMPLATE:3} - MATCH=$(echo "$TEMPLATE" | grep -E "^so-logs-|^so-metrics" | grep -v osquery) + MATCH=$(echo "$TEMPLATE" | grep -E "^so-logs-|^so-metrics" | grep -vE "detections|osquery") if [[ -n "$MATCH" && ! "$COMPONENT_LIST" =~ "$COMPONENT_PATTERN" ]]; then load_failures=$((load_failures+1)) echo "Component template does not exist for $COMPONENT_PATTERN. The index template will not be loaded. Load failures: $load_failures" From f4490fab58954a864942425c4f909f55d7679ffc Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 21 May 2024 17:05:39 -0400 Subject: [PATCH 607/777] Add rule.uuid for YARA matches --- salt/elasticsearch/files/ingest/strelka.file | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/elasticsearch/files/ingest/strelka.file b/salt/elasticsearch/files/ingest/strelka.file index d55e19350..f66a98857 100644 --- a/salt/elasticsearch/files/ingest/strelka.file +++ b/salt/elasticsearch/files/ingest/strelka.file @@ -56,6 +56,7 @@ { "set": { "if": "ctx.exiftool?.Subsystem != null", "field": "host.subsystem", "value": "{{exiftool.Subsystem}}", "ignore_failure": true }}, { "set": { "if": "ctx.scan?.yara?.matches instanceof List", "field": "rule.name", "value": "{{scan.yara.matches.0}}" }}, { "set": { "if": "ctx.rule?.name != null", "field": "event.dataset", "value": "alert", "override": true }}, + { "set": { "if": "ctx.rule?.name != null", "field": "rule.uuid", "value": "rule.name", "override": true }}, { "rename": { "field": "file.flavors.mime", "target_field": "file.mime_type", "ignore_missing": true }}, { "set": { "if": "ctx.rule?.name != null && ctx.rule?.score == null", "field": "event.severity", "value": 3, "override": true } }, { "convert" : { "if": "ctx.rule?.score != null", "field" : "rule.score","type": "integer"}}, From 3992ef108207b88644198cb88e42860f515bf02d Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Tue, 21 May 2024 17:45:56 -0400 Subject: [PATCH 608/777] Add rule.uuid to default groupbys --- salt/soc/defaults.yaml | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 0113f22cc..d19f7ed40 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -2020,6 +2020,7 @@ soc: - event_data.destination.port - event_data.process.executable - event_data.process.pid + - rule.uuid ':sigma:': - soc_timestamp - event.dataset @@ -2033,6 +2034,7 @@ soc: - event_data.destination.port - event_data.process.executable - event_data.process.pid + - rule.uuid ':strelka:': - soc_timestamp - event.dataset @@ -2042,6 +2044,7 @@ soc: - file.source - file.mime_type - log.id.fuid + - rule.uuid queryBaseFilter: tags:alert queryToggleFilters: - name: acknowledged @@ -2056,17 +2059,17 @@ soc: - acknowledged queries: - name: 'Group By Name, Module' - query: '* | groupby rule.name event.module* event.severity_label' + query: '* | groupby rule.name event.module* event.severity_label rule.uuid' - name: 'Group By Sensor, Source IP/Port, Destination IP/Port, Name' - query: '* | groupby observer.name source.ip source.port destination.ip destination.port rule.name network.community_id event.severity_label' + query: '* | groupby observer.name source.ip source.port destination.ip destination.port rule.name network.community_id event.severity_label rule.uuid' - name: 'Group By Source IP, Name' - query: '* | groupby source.ip rule.name event.severity_label' + query: '* | groupby source.ip rule.name event.severity_label rule.uuid' - name: 'Group By Source Port, Name' - query: '* | groupby source.port rule.name event.severity_label' + query: '* | groupby source.port rule.name event.severity_label rule.uuid' - name: 'Group By Destination IP, Name' - query: '* | groupby destination.ip rule.name event.severity_label' + query: '* | groupby destination.ip rule.name event.severity_label rule.uuid' - name: 'Group By Destination Port, Name' - query: '* | groupby destination.port rule.name event.severity_label' + query: '* | groupby destination.port rule.name event.severity_label rule.uuid' - name: Ungroup query: '*' grid: From f9e9b825cffe381115f9aa965f07c084001d866b Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Tue, 21 May 2024 17:53:20 -0400 Subject: [PATCH 609/777] Removed unneeded groupby --- salt/soc/defaults.yaml | 3 --- 1 file changed, 3 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index d19f7ed40..15f1fd8ce 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -2020,7 +2020,6 @@ soc: - event_data.destination.port - event_data.process.executable - event_data.process.pid - - rule.uuid ':sigma:': - soc_timestamp - event.dataset @@ -2034,7 +2033,6 @@ soc: - event_data.destination.port - event_data.process.executable - event_data.process.pid - - rule.uuid ':strelka:': - soc_timestamp - event.dataset @@ -2044,7 +2042,6 @@ soc: - file.source - file.mime_type - log.id.fuid - - rule.uuid queryBaseFilter: tags:alert queryToggleFilters: - name: acknowledged From 8af3158ea7bd3cdc7dfe22f1c6103db06ebbd7ac Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Tue, 21 May 2024 18:28:21 -0400 Subject: [PATCH 610/777] fix elastalert settings --- salt/elastalert/map.jinja | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/salt/elastalert/map.jinja b/salt/elastalert/map.jinja index 8d4e65652..b8ceca277 100644 --- a/salt/elastalert/map.jinja +++ b/salt/elastalert/map.jinja @@ -15,20 +15,16 @@ {% set ELASTALERTMERGED = salt['pillar.get']('elastalert', ELASTALERTDEFAULTS.elastalert, merge=True) %} {% if 'ntf' in salt['pillar.get']('features', []) %} - {% set params = ELASTALERTMERGED.alerter_parameters | load_yaml %} - {% if params != None %} + {% set params = ELASTALERTMERGED.get('alerter_parameters', '') | load_yaml %} + {% if params != None and params | length > 0 %} {% do ELASTALERTMERGED.config.update(params) %} {% endif %} - {% if ELASTALERTMERGED.smtp_user | length > 0 %} + {% if ELASTALERTMERGED.get('smtp_user', '') | length > 0 %} {% do ELASTALERTMERGED.config.update({'smtp_auth_file': '/opt/elastalert/predefined/smtp_auth.yaml'}) %} {% endif %} - {% if ELASTALERTMERGED.smtp_user | length > 0 %} - {% do ELASTALERTMERGED.config.update({'smtp_auth_file': '/opt/elastalert/predefined/smtp_auth.yaml'}) %} - {% endif %} - - {% if ELASTALERTMERGED.jira_user | length > 0 or ELASTALERTMERGED.jira_key | length > 0 %} + {% if ELASTALERTMERGED.get('jira_user', '') | length > 0 or ELASTALERTMERGED.get('jira_key', '') | length > 0 %} {% do ELASTALERTMERGED.config.update({'jira_account_file': '/opt/elastalert/predefined/jira_auth.yaml'}) %} {% endif %} From 91f8b1fef7d0bfc8059abd29e3a77103cb08076c Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 22 May 2024 13:35:09 -0400 Subject: [PATCH 611/777] Set default replication factor back to Kafka default If replication factor is > 1 Kafka will fail to start until another broker is added - For internal automated testing purposes a Standalone will be utilized Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/defaults.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/kafka/defaults.yaml b/salt/kafka/defaults.yaml index 86d2f6e94..9a8c05c43 100644 --- a/salt/kafka/defaults.yaml +++ b/salt/kafka/defaults.yaml @@ -8,7 +8,7 @@ kafka: advertised_x_listeners: auto_x_create_x_topics_x_enable: true controller_x_quorum_x_voters: - default_x_replication_x_factor: 2 + default_x_replication_x_factor: 1 inter_x_broker_x_listener_x_name: BROKER listeners: BROKER://0.0.0.0:9092 listener_x_security_x_protocol_x_map: BROKER:SSL From b1beb617b3202fed18a7ee4a8a6ea489c53fdd6a Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 22 May 2024 13:38:09 -0400 Subject: [PATCH 612/777] Logstash should be disabled when Kafka is enabled except when a minion override exists OR node is a standalone - Standalone subscribes to Kafka topics via logstash for ingest Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/logstash/init.sls | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/salt/logstash/init.sls b/salt/logstash/init.sls index f7adc1330..3bc539b35 100644 --- a/salt/logstash/init.sls +++ b/salt/logstash/init.sls @@ -7,7 +7,10 @@ {% from 'kafka/map.jinja' import KAFKAMERGED %} include: -{% if LOGSTASH_MERGED.enabled and not KAFKAMERGED.enabled %} +{# Disable logstash when Kafka is enabled except when the role is standalone #} +{% if LOGSTASH_MERGED.enabled and grains.role == 'so-standalone' %} + - logstash.enabled +{% elif LOGSTASH_MERGED.enabled and not KAFKAMERGED.enabled %} - logstash.enabled {% else %} - logstash.disabled From 382cd24a57b3394688dbe93f1f9b5d1c6e3998ed Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 22 May 2024 13:39:21 -0400 Subject: [PATCH 613/777] Small changes needed for using new Kafka docker image + added Kafka logging output to /opt/so/log/kafka/ Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/enabled.sls | 7 ++++--- salt/kafka/storage.sls | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/salt/kafka/enabled.sls b/salt/kafka/enabled.sls index b01e6f2a8..833cc7f3c 100644 --- a/salt/kafka/enabled.sls +++ b/salt/kafka/enabled.sls @@ -26,7 +26,7 @@ so-kafka: - user: kafka - environment: KAFKA_HEAP_OPTS: -Xmx2G -Xms1G - KAFKA_OPTS: -javaagent:/jolokia/agents/jolokia-agent-jvm-javaagent.jar=port=8778,host={{ DOCKER.containers['so-kafka'].ip }},policyLocation=file:/jolokia/jolokia.xml + KAFKA_OPTS: -javaagent:/opt/jolokia/agents/jolokia-agent-jvm-javaagent.jar=port=8778,host={{ DOCKER.containers['so-kafka'].ip }},policyLocation=file:/opt/jolokia/jolokia.xml - extra_hosts: {% for node in KAFKANODES %} - {{ node }}:{{ KAFKANODES[node].ip }} @@ -44,9 +44,10 @@ so-kafka: - /etc/pki/kafka.p12:/etc/pki/kafka.p12:ro - /opt/so/conf/ca/cacerts:/etc/pki/java/sos/cacerts:ro - /nsm/kafka/data/:/nsm/kafka/data/:rw - - /opt/so/conf/kafka/server.properties:/kafka/config/kraft/server.properties:ro + - /opt/so/log/kafka:/opt/kafka/logs/:rw + - /opt/so/conf/kafka/server.properties:/opt/kafka/config/kraft/server.properties:ro {% if GLOBALS.is_manager %} - - /opt/so/conf/kafka/client.properties:/kafka/config/kraft/client.properties + - /opt/so/conf/kafka/client.properties:/opt/kafka/config/kraft/client.properties {% endif %} - watch: {% for sc in ['server', 'client'] %} diff --git a/salt/kafka/storage.sls b/salt/kafka/storage.sls index 507c199c6..efc36acf6 100644 --- a/salt/kafka/storage.sls +++ b/salt/kafka/storage.sls @@ -13,7 +13,7 @@ kafka_storage_init: cmd.run: - name: | - docker run -v /nsm/kafka/data:/nsm/kafka/data -v /opt/so/conf/kafka/server.properties:/kafka/config/kraft/newserver.properties --name so-kafkainit --user root --entrypoint /kafka/bin/kafka-storage.sh {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }} format -t {{ kafka_cluster_id }} -c /kafka/config/kraft/newserver.properties + docker run -v /nsm/kafka/data:/nsm/kafka/data -v /opt/so/conf/kafka/server.properties:/opt/kafka/config/kraft/newserver.properties --name so-kafkainit --user root --entrypoint /opt/kafka/bin/kafka-storage.sh {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }} format -t {{ kafka_cluster_id }} -c /opt/kafka/config/kraft/newserver.properties kafka_rm_kafkainit: cmd.run: - name: | From 3cfd71075602fa04b104f004a22cbf151258f970 Mon Sep 17 00:00:00 2001 From: weslambert Date: Wed, 22 May 2024 13:41:32 -0400 Subject: [PATCH 614/777] Change tab casing to be consistent with other whiptail prompts --- setup/so-whiptail | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-whiptail b/setup/so-whiptail index 06d62a027..1dab63237 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -475,7 +475,7 @@ whiptail_end_settings() { read -r -d '' msg <<-EOM $end_msg - Press the Tab key to select yes or no. + Press the TAB key to select yes or no. EOM whiptail --title "The following options have been set, would you like to proceed?" --yesno "$msg" 24 75 --scrolltext From d19c1a514bee994b03a3f3f54df531bbcabcbd7c Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Wed, 22 May 2024 15:12:23 -0400 Subject: [PATCH 615/777] Detections backup script --- salt/soc/config.sls | 20 ++++ salt/soc/files/soc/so-detections-backup.py | 111 +++++++++++++++++++++ 2 files changed, 131 insertions(+) create mode 100644 salt/soc/files/soc/so-detections-backup.py diff --git a/salt/soc/config.sls b/salt/soc/config.sls index a85032295..65d6bd2fa 100644 --- a/salt/soc/config.sls +++ b/salt/soc/config.sls @@ -80,6 +80,15 @@ socmotd: - mode: 600 - template: jinja +filedetectionsbackup: + file.managed: + - name: /opt/so/conf/soc/so-detections-backup.py + - source: salt://soc/files/soc/so-detections-backup.py + - user: 939 + - group: 939 + - mode: 600 + - show_changes: False + crondetectionsruntime: cron.present: - name: /usr/sbin/so-detections-runtime-status cron @@ -91,6 +100,17 @@ crondetectionsruntime: - month: '*' - dayweek: '*' +crondetectionsbackup: + cron.present: + - name: python3 /opt/so/conf/soc/so-detections-backup.py + - identifier: detections-backup + - user: root + - minute: '0' + - hour: '0' + - daymonth: '*' + - month: '*' + - dayweek: '*' + socsigmafinalpipeline: file.managed: - name: /opt/so/conf/soc/sigma_final_pipeline.yaml diff --git a/salt/soc/files/soc/so-detections-backup.py b/salt/soc/files/soc/so-detections-backup.py new file mode 100644 index 000000000..b7e6e2491 --- /dev/null +++ b/salt/soc/files/soc/so-detections-backup.py @@ -0,0 +1,111 @@ +# Copyright 2020-2023 Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +# This script queries Elasticsearch for Custom Detections and all Overrides, +# and git commits them to disk at $OUTPUT_DIR + +import os +import subprocess +import json +import requests +from requests.auth import HTTPBasicAuth +import urllib3 +from datetime import datetime + +# Suppress SSL warnings +urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + +# Constants +ES_URL = "https://localhost:9200/so-detection/_search" +QUERY_DETECTIONS = '{"query": {"bool": {"must": [{"match_all": {}}, {"term": {"so_detection.ruleset": "__custom__"}}]}},"size": 10000}' +QUERY_OVERRIDES = '{"query": {"bool": {"must": [{"exists": {"field": "so_detection.overrides"}}]}},"size": 10000}' +OUTPUT_DIR = "/nsm/backup/detections/repo" +AUTH_FILE = "/opt/so/conf/elasticsearch/curl.config" + +def get_auth_credentials(auth_file): + with open(auth_file, 'r') as file: + for line in file: + if line.startswith('user ='): + return line.split('=', 1)[1].strip().replace('"', '') + +def query_elasticsearch(query, auth): + headers = {"Content-Type": "application/json"} + response = requests.get(ES_URL, headers=headers, data=query, auth=auth, verify=False) + response.raise_for_status() + return response.json() + +def save_content(hit, base_folder, subfolder="", extension="txt"): + so_detection = hit["_source"]["so_detection"] + public_id = so_detection["publicId"] + content = so_detection["content"] + file_dir = os.path.join(base_folder, subfolder) + os.makedirs(file_dir, exist_ok=True) + file_path = os.path.join(file_dir, f"{public_id}.{extension}") + with open(file_path, "w") as f: + f.write(content) + return file_path + +def save_overrides(hit): + so_detection = hit["_source"]["so_detection"] + public_id = so_detection["publicId"] + overrides = so_detection["overrides"] + language = so_detection["language"] + folder = os.path.join(OUTPUT_DIR, language, "overrides") + os.makedirs(folder, exist_ok=True) + extension = "yaml" if language == "sigma" else "txt" + file_path = os.path.join(folder, f"{public_id}.{extension}") + with open(file_path, "w") as f: + f.write('\n'.join(json.dumps(override) for override in overrides) if isinstance(overrides, list) else overrides) + return file_path + +def ensure_git_repo(): + if not os.path.isdir(os.path.join(OUTPUT_DIR, '.git')): + subprocess.run(["git", "config", "--global", "init.defaultBranch", "main"], check=True) + subprocess.run(["git", "-C", OUTPUT_DIR, "init"], check=True) + subprocess.run(["git", "-C", OUTPUT_DIR, "remote", "add", "origin", "default"], check=True) + +def commit_changes(): + ensure_git_repo() + subprocess.run(["git", "-C", OUTPUT_DIR, "config", "user.email", "securityonion@local.invalid"], check=True) + subprocess.run(["git", "-C", OUTPUT_DIR, "config", "user.name", "securityonion"], check=True) + subprocess.run(["git", "-C", OUTPUT_DIR, "add", "."], check=True) + status_result = subprocess.run(["git", "-C", OUTPUT_DIR, "status"], capture_output=True, text=True) + print(status_result.stdout) + commit_result = subprocess.run(["git", "-C", OUTPUT_DIR, "commit", "-m", "Update detections and overrides"], check=False, capture_output=True) + if commit_result.returncode == 1: + print("No changes to commit.") + elif commit_result.returncode == 0: + print("Changes committed successfully.") + else: + commit_result.check_returncode() + +def main(): + try: + timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + print(f"Backing up Custom Detections and all Overrides to {OUTPUT_DIR} - {timestamp}\n") + + auth_credentials = get_auth_credentials(AUTH_FILE) + username, password = auth_credentials.split(':', 1) + auth = HTTPBasicAuth(username, password) + + # Query and save custom detections + detections = query_elasticsearch(QUERY_DETECTIONS, auth)["hits"]["hits"] + for hit in detections: + save_content(hit, OUTPUT_DIR, hit["_source"]["so_detection"]["language"], "yaml" if hit["_source"]["so_detection"]["language"] == "sigma" else "txt") + + # Query and save overrides + overrides = query_elasticsearch(QUERY_OVERRIDES, auth)["hits"]["hits"] + for hit in overrides: + save_overrides(hit) + + commit_changes() + + timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + print(f"Backup Completed - {timestamp}") + except Exception as e: + print(f"An error occurred: {e}") + +if __name__ == "__main__": + main() \ No newline at end of file From a072e34cfeaaabdb24090f6c99eb2bf4b8057b88 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Wed, 22 May 2024 17:12:41 -0400 Subject: [PATCH 616/777] Fix casing issue --- salt/soc/soc_soc.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index 78f541982..60de637b4 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -200,7 +200,7 @@ soc: global: True advanced: True helpLink: yara.html - autoEnabledYARARules: + autoEnabledYaraRules: description: 'YARA rules to automatically enable on initial import. Format is $Ruleset - for example, for the default shipped ruleset: securityonion-yara' global: True advanced: True From 3d4f3a04a3c855e758f00ecbfa0fd9376607f8aa Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Thu, 23 May 2024 05:56:18 -0400 Subject: [PATCH 617/777] Update defaults.yaml to fix order of groupby tables and eliminate duplicate --- salt/soc/defaults.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 15f1fd8ce..6ecf1f183 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1817,7 +1817,7 @@ soc: query: 'tags:dhcp | groupby host.hostname | groupby -sankey host.hostname client.address | groupby client.address | groupby -sankey client.address server.address | groupby server.address | groupby dhcp.message_types | groupby host.domain' - name: DNS description: DNS (Domain Name System) queries - query: 'tags:dns | groupby dns.query.name | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby dns.highest_registered_domain | groupby dns.parent_domain | groupby dns.response.code_name | groupby dns.answers.name | groupby dns.query.type_name | groupby dns.response.code_name | groupby destination_geo.organization_name' + query: 'tags:dns | groupby dns.query.name | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby dns.highest_registered_domain | groupby dns.parent_domain | groupby dns.query.type_name | groupby dns.response.code_name | groupby dns.answers.name | groupby destination_geo.organization_name' - name: DPD description: DPD (Dynamic Protocol Detection) errors query: 'tags:dpd | groupby error.reason | groupby -sankey error.reason source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby network.protocol | groupby destination_geo.organization_name' From 8e7c487cb0e0e13a28ea71896dd2b52fb2ab1ac6 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Thu, 23 May 2024 05:59:31 -0400 Subject: [PATCH 618/777] Fix strelka rule.uuid --- salt/elasticsearch/files/ingest/strelka.file | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/files/ingest/strelka.file b/salt/elasticsearch/files/ingest/strelka.file index f66a98857..d9aed3b29 100644 --- a/salt/elasticsearch/files/ingest/strelka.file +++ b/salt/elasticsearch/files/ingest/strelka.file @@ -56,7 +56,7 @@ { "set": { "if": "ctx.exiftool?.Subsystem != null", "field": "host.subsystem", "value": "{{exiftool.Subsystem}}", "ignore_failure": true }}, { "set": { "if": "ctx.scan?.yara?.matches instanceof List", "field": "rule.name", "value": "{{scan.yara.matches.0}}" }}, { "set": { "if": "ctx.rule?.name != null", "field": "event.dataset", "value": "alert", "override": true }}, - { "set": { "if": "ctx.rule?.name != null", "field": "rule.uuid", "value": "rule.name", "override": true }}, + { "set": { "if": "ctx.rule?.name != null", "field": "rule.uuid", "value": "{{rule.name}}", "override": true }}, { "rename": { "field": "file.flavors.mime", "target_field": "file.mime_type", "ignore_missing": true }}, { "set": { "if": "ctx.rule?.name != null && ctx.rule?.score == null", "field": "event.severity", "value": 3, "override": true } }, { "convert" : { "if": "ctx.rule?.score != null", "field" : "rule.score","type": "integer"}}, From 1e6161f89c82277a409d0051f82a0fa5a79582d9 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 23 May 2024 08:19:43 -0400 Subject: [PATCH 619/777] Update defaults.yaml --- salt/soc/defaults.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 6ecf1f183..86170b4ce 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1482,7 +1482,7 @@ soc: showSubtitle: true - name: Elastalerts description: '' - query: '_type:elastalert | groupby rule.name' + query: 'event.dataset:sigma.alert | groupby rule.name' showSubtitle: true - name: Alerts description: Show all alerts grouped by alert source From 0b9ebefdb670e9ad85bd4bb415291e82ef71c505 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 23 May 2024 10:08:23 -0400 Subject: [PATCH 620/777] only show telem status in final whiptail if new deployment --- setup/so-whiptail | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-whiptail b/setup/so-whiptail index 1dab63237..4fab6dbe4 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -451,7 +451,7 @@ whiptail_end_settings() { done fi - if [[ ! $is_airgap ]]; then + if [[ ! $is_airgap ]] && [[ $dist_option == "NEWDEPLOYMENT" ]]; then if [[ $telemetry -eq 0 ]]; then __append_end_msg "SOC Telemetry: enabled" else From ea7715f7297c426956b7074193ca331f0aaa0b18 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 23 May 2024 10:41:10 -0400 Subject: [PATCH 621/777] use waitforstate var instead. --- setup/so-whiptail | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-whiptail b/setup/so-whiptail index 4fab6dbe4..d950f2921 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -451,7 +451,7 @@ whiptail_end_settings() { done fi - if [[ ! $is_airgap ]] && [[ $dist_option == "NEWDEPLOYMENT" ]]; then + if [[ ! $is_airgap ]] && [[ $waitforstate ]]; then if [[ $telemetry -eq 0 ]]; then __append_end_msg "SOC Telemetry: enabled" else From b5f656ae58d2ea2580238211f278262d82e65a3f Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 23 May 2024 13:22:22 -0400 Subject: [PATCH 622/777] dont render pillar each time so-tcpreplay runs --- salt/common/tools/{sbin => sbin_jinja}/so-tcpreplay | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) rename salt/common/tools/{sbin => sbin_jinja}/so-tcpreplay (92%) diff --git a/salt/common/tools/sbin/so-tcpreplay b/salt/common/tools/sbin_jinja/so-tcpreplay similarity index 92% rename from salt/common/tools/sbin/so-tcpreplay rename to salt/common/tools/sbin_jinja/so-tcpreplay index 99314c289..6f3f02983 100755 --- a/salt/common/tools/sbin/so-tcpreplay +++ b/salt/common/tools/sbin_jinja/so-tcpreplay @@ -10,7 +10,7 @@ . /usr/sbin/so-common . /usr/sbin/so-image-common -REPLAYIFACE=${REPLAYIFACE:-$(lookup_pillar interface sensor)} +REPLAYIFACE=${REPLAYIFACE:-"{{pillar.sensor.interface}}"} REPLAYSPEED=${REPLAYSPEED:-10} mkdir -p /opt/so/samples @@ -57,8 +57,8 @@ if ! docker ps | grep -q so-tcpreplay; then fi if is_sensor_node; then - echo "Replaying PCAP(s) at ${REPLAYSPEED} Mbps on interface ${REPLAYIFACE}..." - docker exec so-tcpreplay /usr/bin/bash -c "/usr/local/bin/tcpreplay -i ${REPLAYIFACE} -M${REPLAYSPEED} $@" + echo "Replaying PCAP(s) at ${REPLAYSPEED} Mbps on interface $REPLAYIFACE..." + docker exec so-tcpreplay /usr/bin/bash -c "/usr/local/bin/tcpreplay -i $REPLAYIFACE -M${REPLAYSPEED} $@" echo "Replay completed. Warnings shown above are typically expected." elif is_manager_node; then From 15155613c3d60a2e48a7e0c922ae09eb5d225acb Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Fri, 24 May 2024 08:23:45 -0400 Subject: [PATCH 623/777] provide default columns when viewing SOC logs --- salt/soc/defaults.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 86170b4ce..39960d946 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1271,6 +1271,14 @@ soc: - netflow.type - netflow.exporter.version - observer.ip + ':soc:': + - soc_timestamp + - source.ip + - soc.fields.requestMethod + - soc.fields.requestPath + - soc.fields.statusCode + - event.action + - soc.fields.error server: bindAddress: 0.0.0.0:9822 baseUrl: / From bd11d59c15fb711332ab0c67423e217f2b4b1b70 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Fri, 24 May 2024 08:38:12 -0400 Subject: [PATCH 624/777] add event.dataset since there are other datasets in soc logs --- salt/soc/defaults.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 39960d946..9f5faf50b 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1273,6 +1273,7 @@ soc: - observer.ip ':soc:': - soc_timestamp + - event.dataset - source.ip - soc.fields.requestMethod - soc.fields.requestPath From 66725b11b304d1742caad49854bd968f71196c97 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Fri, 24 May 2024 09:55:10 -0400 Subject: [PATCH 625/777] Added unit tests --- salt/soc/config.sls | 3 +- salt/soc/files/soc/so-detections-backup.py | 2 + .../files/soc/so-detections-backup_test.py | 159 ++++++++++++++++++ 3 files changed, 162 insertions(+), 2 deletions(-) create mode 100644 salt/soc/files/soc/so-detections-backup_test.py diff --git a/salt/soc/config.sls b/salt/soc/config.sls index 65d6bd2fa..8d1f0f694 100644 --- a/salt/soc/config.sls +++ b/salt/soc/config.sls @@ -87,7 +87,6 @@ filedetectionsbackup: - user: 939 - group: 939 - mode: 600 - - show_changes: False crondetectionsruntime: cron.present: @@ -102,7 +101,7 @@ crondetectionsruntime: crondetectionsbackup: cron.present: - - name: python3 /opt/so/conf/soc/so-detections-backup.py + - name: python3 /opt/so/conf/soc/so-detections-backup.py &>> /opt/so/log/soc/detections-backup.log - identifier: detections-backup - user: root - minute: '0' diff --git a/salt/soc/files/soc/so-detections-backup.py b/salt/soc/files/soc/so-detections-backup.py index b7e6e2491..085b1e4c7 100644 --- a/salt/soc/files/soc/so-detections-backup.py +++ b/salt/soc/files/soc/so-detections-backup.py @@ -86,6 +86,8 @@ def main(): timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") print(f"Backing up Custom Detections and all Overrides to {OUTPUT_DIR} - {timestamp}\n") + os.makedirs(OUTPUT_DIR, exist_ok=True) + auth_credentials = get_auth_credentials(AUTH_FILE) username, password = auth_credentials.split(':', 1) auth = HTTPBasicAuth(username, password) diff --git a/salt/soc/files/soc/so-detections-backup_test.py b/salt/soc/files/soc/so-detections-backup_test.py new file mode 100644 index 000000000..3afa11886 --- /dev/null +++ b/salt/soc/files/soc/so-detections-backup_test.py @@ -0,0 +1,159 @@ +# Copyright 2020-2023 Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +import unittest +from unittest.mock import patch, MagicMock, mock_open, call +import requests +import os +import subprocess +import json +from datetime import datetime +import importlib + +ds = importlib.import_module('so-detections-backup') + +class TestBackupScript(unittest.TestCase): + + def setUp(self): + self.output_dir = '/nsm/backup/detections/repo' + self.auth_file_path = '/nsm/backup/detections/repo' + self.mock_auth_data = 'user = "so_elastic:@Tu_dv_[7SvK7[-JZN39BBlSa;WAyf8rCY+3w~Sntp=7oR9*~34?Csi)a@v?)K*vK4vQAywS"' + self.auth_credentials = 'so_elastic:@Tu_dv_[7SvK7[-JZN39BBlSa;WAyf8rCY+3w~Sntp=7oR9*~34?Csi)a@v?)K*vK4vQAywS' + self.auth = requests.auth.HTTPBasicAuth('so_elastic', '@Tu_dv_[7SvK7[-JZN39BBlSa;WAyf8rCY+3w~Sntp=7oR9*~34?Csi)a@v?)K*vK4vQAywS') + self.mock_detection_hit = { + "_source": { + "so_detection": { + "publicId": "test_id", + "content": "test_content", + "language": "suricata" + } + } + } + self.mock_override_hit = { + "_source": { + "so_detection": { + "publicId": "test_id", + "overrides": [{"key": "value"}], + "language": "sigma" + } + } + } + + def assert_file_written(self, mock_file, expected_path, expected_content): + mock_file.assert_called_once_with(expected_path, 'w') + mock_file().write.assert_called_once_with(expected_content) + + @patch('builtins.open', new_callable=mock_open, read_data='user = "so_elastic:@Tu_dv_[7SvK7[-JZN39BBlSa;WAyf8rCY+3w~Sntp=7oR9*~34?Csi)a@v?)K*vK4vQAywS"') + def test_get_auth_credentials(self, mock_file): + credentials = ds.get_auth_credentials(self.auth_file_path) + self.assertEqual(credentials, self.auth_credentials) + mock_file.assert_called_once_with(self.auth_file_path, 'r') + + @patch('requests.get') + def test_query_elasticsearch(self, mock_get): + mock_response = MagicMock() + mock_response.json.return_value = {'hits': {'hits': []}} + mock_response.raise_for_status = MagicMock() + mock_get.return_value = mock_response + + response = ds.query_elasticsearch(ds.QUERY_DETECTIONS, self.auth) + + self.assertEqual(response, {'hits': {'hits': []}}) + mock_get.assert_called_once_with( + ds.ES_URL, + headers={"Content-Type": "application/json"}, + data=ds.QUERY_DETECTIONS, + auth=self.auth, + verify=False + ) + + @patch('os.makedirs') + @patch('builtins.open', new_callable=mock_open) + def test_save_content(self, mock_file, mock_makedirs): + file_path = ds.save_content(self.mock_detection_hit, self.output_dir, 'subfolder', 'txt') + expected_path = f'{self.output_dir}/subfolder/test_id.txt' + self.assertEqual(file_path, expected_path) + mock_makedirs.assert_called_once_with(f'{self.output_dir}/subfolder', exist_ok=True) + self.assert_file_written(mock_file, expected_path, 'test_content') + + @patch('os.makedirs') + @patch('builtins.open', new_callable=mock_open) + def test_save_overrides(self, mock_file, mock_makedirs): + file_path = ds.save_overrides(self.mock_override_hit) + expected_path = f'{self.output_dir}/sigma/overrides/test_id.yaml' + self.assertEqual(file_path, expected_path) + mock_makedirs.assert_called_once_with(f'{self.output_dir}/sigma/overrides', exist_ok=True) + self.assert_file_written(mock_file, expected_path, json.dumps({"key": "value"})) + + @patch('subprocess.run') + def test_ensure_git_repo(self, mock_run): + mock_run.return_value = MagicMock(returncode=0) + + ds.ensure_git_repo() + + mock_run.assert_has_calls([ + call(["git", "config", "--global", "init.defaultBranch", "main"], check=True), + call(["git", "-C", self.output_dir, "init"], check=True), + call(["git", "-C", self.output_dir, "remote", "add", "origin", "default"], check=True) + ]) + + @patch('subprocess.run') + def test_commit_changes(self, mock_run): + mock_status_result = MagicMock() + mock_status_result.stdout = "On branch main\nnothing to commit, working tree clean" + mock_commit_result = MagicMock(returncode=1) + # Ensure sufficient number of MagicMock instances for each subprocess.run call + mock_run.side_effect = [mock_status_result, mock_commit_result, MagicMock(returncode=0), MagicMock(returncode=0), MagicMock(returncode=0), MagicMock(returncode=0), MagicMock(returncode=0), MagicMock(returncode=0)] + + print("Running test_commit_changes...") + ds.commit_changes() + print("Finished test_commit_changes.") + + mock_run.assert_has_calls([ + call(["git", "-C", self.output_dir, "config", "user.email", "securityonion@local.invalid"], check=True), + call(["git", "-C", self.output_dir, "config", "user.name", "securityonion"], check=True), + call(["git", "-C", self.output_dir, "add", "."], check=True), + call(["git", "-C", self.output_dir, "status"], capture_output=True, text=True), + call(["git", "-C", self.output_dir, "commit", "-m", "Update detections and overrides"], check=False, capture_output=True) + ]) + + @patch('builtins.print') + @patch('so-detections-backup.commit_changes') + @patch('so-detections-backup.save_overrides') + @patch('so-detections-backup.save_content') + @patch('so-detections-backup.query_elasticsearch') + @patch('so-detections-backup.get_auth_credentials') + @patch('os.makedirs') + def test_main(self, mock_makedirs, mock_get_auth, mock_query, mock_save_content, mock_save_overrides, mock_commit, mock_print): + mock_get_auth.return_value = self.auth_credentials + mock_query.side_effect = [ + {'hits': {'hits': [{"_source": {"so_detection": {"publicId": "1", "content": "content1", "language": "sigma"}}}]}}, + {'hits': {'hits': [{"_source": {"so_detection": {"publicId": "2", "overrides": [{"key": "value"}], "language": "suricata"}}}]}} + ] + + with patch('datetime.datetime') as mock_datetime: + mock_datetime.now.return_value.strftime.return_value = "2024-05-23 20:49:44" + ds.main() + + mock_makedirs.assert_called_once_with(self.output_dir, exist_ok=True) + mock_get_auth.assert_called_once_with(ds.AUTH_FILE) + mock_query.assert_has_calls([ + call(ds.QUERY_DETECTIONS, self.auth), + call(ds.QUERY_OVERRIDES, self.auth) + ]) + mock_save_content.assert_called_once_with( + {"_source": {"so_detection": {"publicId": "1", "content": "content1", "language": "sigma"}}}, + self.output_dir, + "sigma", + "yaml" + ) + mock_save_overrides.assert_called_once_with( + {"_source": {"so_detection": {"publicId": "2", "overrides": [{"key": "value"}], "language": "suricata"}}} + ) + mock_commit.assert_called_once() + mock_print.assert_called() + +if __name__ == '__main__': + unittest.main(verbosity=2) From 4344988abeafe731e26f0305cdd62af1034c4e59 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Fri, 24 May 2024 12:54:36 -0400 Subject: [PATCH 626/777] Add instructions for sigma and yara repos --- salt/soc/soc_soc.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index 60de637b4..415829460 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -119,14 +119,14 @@ soc: advanced: True rulesRepos: default: &eerulesRepos - description: "Custom Git repos to pull Sigma rules from. 'license' field is required, 'folder' is optional. 'community' disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled." + description: "Custom Git repositories to pull Sigma rules from. 'license' field is required, 'folder' is optional. 'community' disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled. The new settings will be applied within 15 minutes. At that point, you will need to wait for the scheduled rule update to take place (by default, every 24 hours), or you can force the update by nagivating to Detections --> Options dropdown menu --> Elastalert --> Full Update." global: True advanced: True forcedType: "[]{}" helpLink: sigma.html airgap: *eerulesRepos sigmaRulePackages: - description: 'Defines the Sigma Community Ruleset you want to run. One of these (core | core+ | core++ | all ) as well as an optional Add-on (emerging_threats_addon). Once you have changed the ruleset here, you will need to wait for the scheduled rule update to take place (by default, every 24 hours), or you can force the update by nagivating to Detections --> Options dropdown menu --> Elastalert --> Full Update. WARNING! Changing the ruleset will remove all existing non-overlapping Sigma rules of the previous ruleset and their associated overrides. This removal cannot be undone.' + description: 'Defines the Sigma Community Ruleset you want to run. One of these (core | core+ | core++ | all ) as well as an optional Add-on (emerging_threats_addon). Once you have changed the ruleset here, the new settings will be applied within 15 minutes. At that point, you will need to wait for the scheduled rule update to take place (by default, every 24 hours), or you can force the update by nagivating to Detections --> Options dropdown menu --> Elastalert --> Full Update. WARNING! Changing the ruleset will remove all existing non-overlapping Sigma rules of the previous ruleset and their associated overrides. This removal cannot be undone.' global: True advanced: False helpLink: sigma.html @@ -221,7 +221,7 @@ soc: advanced: True rulesRepos: default: &serulesRepos - description: "Custom Git repos to pull YARA rules from. 'license' field is required, 'folder' is optional. 'community' disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled." + description: "Custom Git repositories to pull YARA rules from. 'license' field is required, 'folder' is optional. 'community' disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled. The new settings will be applied within 15 minutes. At that point, you will need to wait for the scheduled rule update to take place (by default, every 24 hours), or you can force the update by nagivating to Detections --> Options dropdown menu --> Strelka --> Full Update."" global: True advanced: True forcedType: "[]{}" From f90d40b4714073c640bccfa05e36755f6a31c6af Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Fri, 24 May 2024 12:56:17 -0400 Subject: [PATCH 627/777] Fix typo --- salt/soc/soc_soc.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index 415829460..1f64eb0bc 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -221,7 +221,7 @@ soc: advanced: True rulesRepos: default: &serulesRepos - description: "Custom Git repositories to pull YARA rules from. 'license' field is required, 'folder' is optional. 'community' disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled. The new settings will be applied within 15 minutes. At that point, you will need to wait for the scheduled rule update to take place (by default, every 24 hours), or you can force the update by nagivating to Detections --> Options dropdown menu --> Strelka --> Full Update."" + description: "Custom Git repositories to pull YARA rules from. 'license' field is required, 'folder' is optional. 'community' disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled. The new settings will be applied within 15 minutes. At that point, you will need to wait for the scheduled rule update to take place (by default, every 24 hours), or you can force the update by nagivating to Detections --> Options dropdown menu --> Strelka --> Full Update." global: True advanced: True forcedType: "[]{}" From 550b3ee92d02d4d6ce6bb11610532b68fb02089c Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Fri, 24 May 2024 14:46:24 -0400 Subject: [PATCH 628/777] Add IDH mappings --- salt/soc/files/soc/sigma_so_pipeline.yaml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/salt/soc/files/soc/sigma_so_pipeline.yaml b/salt/soc/files/soc/sigma_so_pipeline.yaml index 312d07965..8314361f5 100644 --- a/salt/soc/files/soc/sigma_so_pipeline.yaml +++ b/salt/soc/files/soc/sigma_so_pipeline.yaml @@ -17,6 +17,16 @@ transformations: dst_ip: destination.ip.keyword dst_port: destination.port winlog.event_data.User: user.name + logtype: event.code # OpenCanary + # Maps "opencanary" product to SO IDH logs + - id: opencanary_idh_add-fields + type: add_condition + conditions: + event.module: 'opencanary' + event.dataset: 'opencanary.idh' + rule_conditions: + - type: logsource + product: opencanary # Maps "antivirus" category to Windows Defender logs shipped by Elastic Agent Winlog Integration # winlog.event_data.threat_name has to be renamed prior to ingestion, it is originally winlog.event_data.Threat Name - id: antivirus_field-mappings_windows-defender @@ -88,3 +98,11 @@ transformations: - type: logsource product: linux service: auth + # event.code should always be a string + - id: convert_event_code_to_string + type: convert_type + target_type: 'str' + field_name_conditions: + - type: include_fields + fields: + - event.code From 58b565558df8acf341d2419fb27fb92e8725056e Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Fri, 24 May 2024 16:21:59 -0400 Subject: [PATCH 629/777] Dont bail - just wait for enter --- salt/manager/tools/sbin/soup | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 525fce3f6..b6bf61d2a 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -680,16 +680,15 @@ playbook_migration() { active_rules_count=$(find /opt/so/rules/elastalert/playbook/ -type f -name "*.yaml" | wc -l) if [[ "$active_rules_count" -gt 0 ]]; then - # Prompt the user to AGREE if active Elastalert rules found + # Prompt the user to press ENTER if active Elastalert rules found echo echo "$active_rules_count Active Elastalert/Playbook rules found." echo "In preparation for the new Detections module, they will be backed up and then disabled." echo - echo "If you would like to proceed, then type AGREE and press ENTER." + echo "Press ENTER to proceed." echo # Read user input - read INPUT - if [ "${INPUT^^}" != 'AGREE' ]; then fail "SOUP canceled."; fi + read -r echo "Backing up the Elastalert rules..." rsync -av --stats /opt/so/rules/elastalert/playbook/*.yaml /nsm/backup/detections-migration/elastalert/ From fcb6a47e8c39a4d7a0bd3d3f75ffd4357154c6cf Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Sun, 26 May 2024 21:10:41 -0400 Subject: [PATCH 630/777] Remove redis.sh telegraf script when Kafka is global pipeline Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/telegraf/map.jinja | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/salt/telegraf/map.jinja b/salt/telegraf/map.jinja index b56c8a64d..1d92b8b5b 100644 --- a/salt/telegraf/map.jinja +++ b/salt/telegraf/map.jinja @@ -22,3 +22,10 @@ {% endif %} {% endif %} + +{% if GLOBALS.pipeline != 'REDIS' %} +{# When global pipeline is not REDIS remove redis.sh script. KAFKA metrics are collected via jolokia agent. Config in telegraf.conf #} +{% if GLOBALS.role in ['so-standalone', 'so-manager', 'so-managersearch', 'so-receiver', 'so-heavynode'] %} +{% do TELEGRAFMERGED.scripts[GLOBALS.role.split('-')[1]].remove('redis.sh') %} +{% endif %} +{% endif %} From 81ee60e6586f2fac5e47c96c40fa2ca32245f6c6 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Tue, 28 May 2024 06:42:18 -0400 Subject: [PATCH 631/777] Backup .yml files too --- salt/manager/tools/sbin/soup | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index b6bf61d2a..c37138e19 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -677,7 +677,7 @@ playbook_migration() { if grep -A 1 'playbook:' /opt/so/saltstack/local/pillar/minions/* | grep -q 'enabled: True'; then # Check for active Elastalert rules - active_rules_count=$(find /opt/so/rules/elastalert/playbook/ -type f -name "*.yaml" | wc -l) + active_rules_count=$(find /opt/so/rules/elastalert/playbook/ -type f \( -name "*.yaml" -o -name "*.yml" \) | wc -l) if [[ "$active_rules_count" -gt 0 ]]; then # Prompt the user to press ENTER if active Elastalert rules found @@ -691,7 +691,8 @@ playbook_migration() { read -r echo "Backing up the Elastalert rules..." - rsync -av --stats /opt/so/rules/elastalert/playbook/*.yaml /nsm/backup/detections-migration/elastalert/ + rsync -av --stats /opt/so/rules/elastalert/playbook/*.{yaml,yml} /nsm/backup/detections-migration/elastalert/ + fi # Verify that rsync completed successfully if [[ $? -eq 0 ]]; then From 74dfc25376d845d042cfd1c00b57541ec40cffcc Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Tue, 28 May 2024 09:29:10 -0400 Subject: [PATCH 632/777] backup local rules --- salt/manager/tools/sbin/soup | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index c37138e19..9b61f3c8c 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -660,6 +660,11 @@ suricata_idstools_migration() { fail "Error: rsync failed to copy the files. Thresholds have not been backed up." fi + #Backup local rules + mkdir -p /nsm/backup/detections-migration/suricata/local-rules + rsync -av /opt/so/rules/nids/suri/local.rules /nsm/backup/detections-migration/suricata/local-rules + rsync -av /opt/so/saltstack/local/salt/idstools/rules/local.rules /nsm/backup/detections-migration/suricata/local-rules + #Tell SOC to migrate mkdir -p /opt/so/conf/soc/migrations echo "0" > /opt/so/conf/soc/migrations/suricata-migration-2.4.70 From 2a2b86ebe62b2690b4c2b0e51d0124f60c976b24 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Tue, 28 May 2024 09:43:45 -0400 Subject: [PATCH 633/777] Dont overwrite --- salt/manager/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 9b61f3c8c..7d94fcf55 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -663,7 +663,7 @@ suricata_idstools_migration() { #Backup local rules mkdir -p /nsm/backup/detections-migration/suricata/local-rules rsync -av /opt/so/rules/nids/suri/local.rules /nsm/backup/detections-migration/suricata/local-rules - rsync -av /opt/so/saltstack/local/salt/idstools/rules/local.rules /nsm/backup/detections-migration/suricata/local-rules + rsync -av /opt/so/saltstack/local/salt/idstools/rules/local.rules /nsm/backup/detections-migration/suricata/local-rules/local.rules.bak #Tell SOC to migrate mkdir -p /opt/so/conf/soc/migrations From f68ac23f0eb69334e78fb0c8c45ddd71f6a7f8d4 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Tue, 28 May 2024 10:03:31 -0400 Subject: [PATCH 634/777] Fix fi Signed-off-by: DefensiveDepth --- salt/manager/tools/sbin/soup | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 7d94fcf55..39c684c08 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -697,7 +697,6 @@ playbook_migration() { echo "Backing up the Elastalert rules..." rsync -av --stats /opt/so/rules/elastalert/playbook/*.{yaml,yml} /nsm/backup/detections-migration/elastalert/ - fi # Verify that rsync completed successfully if [[ $? -eq 0 ]]; then From ee4ca0d7a214ee764f841d0d90e9119ced80fe07 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Tue, 28 May 2024 10:24:09 -0400 Subject: [PATCH 635/777] Check to see if local exists --- salt/manager/tools/sbin/soup | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 39c684c08..b8297ad44 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -663,7 +663,9 @@ suricata_idstools_migration() { #Backup local rules mkdir -p /nsm/backup/detections-migration/suricata/local-rules rsync -av /opt/so/rules/nids/suri/local.rules /nsm/backup/detections-migration/suricata/local-rules - rsync -av /opt/so/saltstack/local/salt/idstools/rules/local.rules /nsm/backup/detections-migration/suricata/local-rules/local.rules.bak + if [[ -f /opt/so/saltstack/local/salt/idstools/rules/local.rules ]]; then + rsync -av /opt/so/saltstack/local/salt/idstools/rules/local.rules /nsm/backup/detections-migration/suricata/local-rules/local.rules.bak + fi #Tell SOC to migrate mkdir -p /opt/so/conf/soc/migrations From 15a0b959aae7a9a2381e9b2aa66b5240230b3499 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 28 May 2024 10:51:39 -0400 Subject: [PATCH 636/777] Add jolokia metrics for influxdb dashboard Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/telegraf/etc/telegraf.conf | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/salt/telegraf/etc/telegraf.conf b/salt/telegraf/etc/telegraf.conf index 42a8d43bf..1e8d9d3fe 100644 --- a/salt/telegraf/etc/telegraf.conf +++ b/salt/telegraf/etc/telegraf.conf @@ -259,6 +259,23 @@ field_prefix = "$1." tag_keys = ["topic"] +[[inputs.jolokia2_agent.metric]] + name = "controller" + mbean = "kafka.controller:name=*,type=*" + field_prefix = "$1." + +[[inputs.jolokia2_agent.metric]] + name = "partition" + mbean = "kafka.log:name=*,partition=*,topic=*,type=Log" + field_name = "$1" + tag_keys = ["topic", "partition"] + +[[inputs.jolokia2_agent.metric]] + name = "partition" + mbean = "kafka.cluster:name=UnderReplicated,partition=*,topic=*,type=Partition" + field_name = "UnderReplicatedPartitions" + tag_keys = ["topic", "partition"] + {%- endif %} # # Read metrics from one or more commands that can output to stdout {%- if 'sostatus.sh' in TELEGRAFMERGED.scripts[GLOBALS.role.split('-')[1]] %} From 47efcfd6e2ac4bf828755893b6f42343cd8297a6 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 28 May 2024 10:55:11 -0400 Subject: [PATCH 637/777] Add basic Kafka metrics to 'Security Onion Performance' influxdb dashboard Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- .../templates/dashboard-security_onion_performance.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/influxdb/templates/dashboard-security_onion_performance.json b/salt/influxdb/templates/dashboard-security_onion_performance.json index e4f2a6d38..05eb9de99 100644 --- a/salt/influxdb/templates/dashboard-security_onion_performance.json +++ b/salt/influxdb/templates/dashboard-security_onion_performance.json @@ -1 +1 @@ -[{"apiVersion":"influxdata.com/v2alpha1","kind":"Dashboard","metadata":{"name":"vivid-wilson-002001"},"spec":{"charts":[{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Uptime","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> hostFilter()\n |> map(fn: (r) => ({r with _value: r._value / (24 * 60 * 60)}))\n |> group(columns: [\"host\"])\n |> last()\n |> lowestMin(n:1)"}],"staticLegend":{},"suffix":" days","width":1},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"},{"id":"z83MTSufTrlrCoEPiBXda","name":"ruby","type":"text","hex":"#BF3D5E","value":1}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Critical Alarms","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> filter(fn: (r) => r[\"_level\"] == \"crit\")\n |> count()"}],"staticLegend":{},"suffix":" ","width":1,"yPos":2},{"colors":[{"id":"base","name":"rainforest","type":"text","hex":"#4ED8A0"},{"id":"QCTYWuGuHkikYFsZSKMzQ","name":"rainforest","type":"text","hex":"#4ED8A0"},{"id":"QdpMyTRBb0LJ56-P5wfAW","name":"laser","type":"text","hex":"#00C9FF","value":1},{"id":"VQGwCoMrxZyP8asiOW5Cq","name":"tiger","type":"text","hex":"#F48D38","value":2},{"id":"zSO9QkesSIxrU_ntCBx2i","name":"ruby","type":"text","hex":"#BF3D5E","value":3}],"fieldOptions":[{"fieldName":"_time","visible":true},{"displayName":"Alarm","fieldName":"_check_name","visible":true},{"displayName":"Severity","fieldName":"_value","visible":true},{"displayName":"Status","fieldName":"_level","visible":true}],"height":6,"kind":"Table","name":"Alarm Status","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> drop(columns: [\"_value\"])\n |> duplicate(column: \"_level\", as: \"_value\")\n |> map(fn: (r) => ({ r with _value: if r._value == \"ok\" then 0 else if r._value == \"info\" then 1 else if r._value == \"warn\" then 2 else 3 }))\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> keep(columns: [\"_check_name\",\"_level\",\"_value\"])"}],"staticLegend":{},"tableOptions":{"sortBy":"_check_name","verticalTimeAxis":true},"timeFormat":"YYYY-MM-DD HH:mm:ss","width":3,"yPos":4},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"B"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Elasticsearch Storage Size","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"store_size_in_bytes\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"mean\")"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"store_size_in_bytes\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":10},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"B"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"InfluxDB Size","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"influxsize\")\n |> filter(fn: (r) => r[\"_field\"] == \"kbytes\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 1000.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"mean\")"},{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"influxsize\")\n |> filter(fn: (r) => r[\"_field\"] == \"kbytes\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 1000.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":14},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":" days"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"System Uptime","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24 * 60 * 60)}))\n |> yield(name: \"last\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24 * 60 * 60)}))\n |> yield(name: \"Trend\")"}],"shade":true,"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":18},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"System CPU Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: r._value * -1.0 + 100.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\",\"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: r._value * -1.0 + 100.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":22},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"QDwChKZWuQV0BaJcEeSam","name":"Atlantis","type":"scale","hex":"#74D495"},{"id":"ThD0WTqKHltQEVlq9mo6K","name":"Atlantis","type":"scale","hex":"#3F3FBA"},{"id":"FBHYZiwDLKyQK3eRfUD-0","name":"Atlantis","type":"scale","hex":"#FF4D9E"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"System Memory Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":26},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Monitor Interface Traffic - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"bytes_recv\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":30},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Management Interface Traffic - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"bytes_recv\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":6,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":34},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Stenographer Packet Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"stenodrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"stenodrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":38},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Disk Usage /","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":42},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"5m Load Average","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"load5\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"width":1,"xPos":1},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"},{"id":"z83MTSufTrlrCoEPiBXda","name":"tiger","type":"text","hex":"#F48D38","value":1}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Warning Alarms","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> filter(fn: (r) => r[\"_level\"] == \"warn\")\n |> count()"}],"staticLegend":{},"suffix":" ","width":1,"xPos":1,"yPos":2},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"IO Wait","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_iowait\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":2},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"},{"id":"z83MTSufTrlrCoEPiBXda","name":"laser","type":"text","hex":"#00C9FF","value":1}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Informative Alarms","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> filter(fn: (r) => r[\"_level\"] == \"info\")\n |> count()"}],"staticLegend":{},"suffix":" ","width":1,"xPos":2,"yPos":2},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Estimated EPS In","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"in\")\n |> hostFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"width":1,"xPos":3},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"pineapple","type":"threshold","hex":"#FFB94A","value":70},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"fire","type":"threshold","hex":"#DC4E58","value":80},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"CPU Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> map(fn: (r) => ({r with _value: r._value * -1.0 + 100.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":3,"yPos":2},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"kOQLOg2H4FVEE-E1_L8Kq","name":"laser","type":"threshold","hex":"#00C9FF","value":85},{"id":"5IArg2lDb8KvnphywgUXa","name":"tiger","type":"threshold","hex":"#F48D38","value":90},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"ruby","type":"threshold","hex":"#BF3D5E","value":95},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"Root Disk Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":3,"yPos":6},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Suricata Packet Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"suridrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"suridrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","xPos":3,"yCol":"_value","yPos":38},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Redis Queue","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"redisqueue\")\n |> filter(fn: (r) => r[\"_field\"] == \"unparsed\")\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"width":1,"xPos":4},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Elasticsearch Document Count","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"docs_count\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"mean\")"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"docs_count\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":10},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"heightRatio":0.301556420233463,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Redis Queue","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"redisqueue\")\n |> filter(fn: (r) => r[\"_field\"] == \"unparsed\")\n |> group(columns: [\"host\", \"_field\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"redisqueue\")\n |> filter(fn: (r) => r[\"_field\"] == \"unparsed\")\n |> group(columns: [\"host\", \"_field\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.301556420233463,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":14},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":" days"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container Uptime","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_status\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime_ns\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> group(columns: [\"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24 * 60 * 60 * 1000000000)}))\n |> yield(name: \"last\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_status\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime_ns\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> group(columns: [\"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24.0 * 60.0 * 60.0 * 1000000000.0)}))\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":18},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"yT5vTIlaaFChSrQvKLfqf","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"mzzUVSu3ibTph1JmQmDAQ","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"mOcnDo7l8ii6qNLFIB5rs","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container CPU Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"mean\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":22},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"QDwChKZWuQV0BaJcEeSam","name":"Atlantis","type":"scale","hex":"#74D495"},{"id":"ThD0WTqKHltQEVlq9mo6K","name":"Atlantis","type":"scale","hex":"#3F3FBA"},{"id":"FBHYZiwDLKyQK3eRfUD-0","name":"Atlantis","type":"scale","hex":"#FF4D9E"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container Memory Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"mean\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":26},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b"}],"colorizeRows":true,"colors":[{"id":"0ynR6Zs0wuQ3WY0Lz-_KC","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"YiArehCNBwFm9mn8DSXSG","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"DxByY_EQW9Xs2jD5ktkG5","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container Traffic - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_net\")\n |> filter(fn: (r) => r[\"_field\"] == \"rx_bytes\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"mean\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_net\")\n |> filter(fn: (r) => r[\"_field\"] == \"rx_bytes\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with _value: r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":30},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Disk Usage /nsm","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xPos":4,"yPos":42},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Inbound Traffic","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\") \n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: r._value * 8.0 / (1000.0 * 1000.0)}))\n |> group(columns: [\"host\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> last()\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":" Mb/s","width":1,"xPos":5},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Inbound Drops","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop_in\") \n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: r._value * 8.0 / (1000.0 * 1000.0)}))\n |> group(columns: [\"host\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> last()\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":" Mb/s","width":1,"xPos":6},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"pineapple","type":"threshold","hex":"#FFB94A","value":70},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"fire","type":"threshold","hex":"#DC4E58","value":80},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"Memory Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":6,"yPos":2},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"laser","type":"threshold","hex":"#00C9FF","value":85},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"tiger","type":"threshold","hex":"#F48D38","value":90},{"id":"H7uprvKmMEh39en6X-ms_","name":"ruby","type":"threshold","hex":"#BF3D5E","value":95},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"NSM Disk Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":6,"yPos":6},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Management Interface Traffic - Outbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_sent\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n \n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"bytes_sent\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_sent\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n \n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":6,"widthRatio":1,"xCol":"_time","xPos":6,"yCol":"_value","yPos":34},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Zeek Packet Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekdrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekdrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","xPos":6,"yCol":"_value","yPos":38},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Capture Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekcaptureloss\")\n |> filter(fn: (r) => r[\"_field\"] == \"loss\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":7},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Zeek Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekdrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":8},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"s"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"heightRatio":0.301556420233463,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Elastic Ingest Time Spent","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_community_id_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"community.id_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_conditional_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"conditional_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_date_index_name_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"date.index.name_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_date_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"date_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_dissect_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"dissect_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_dot_expander_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"dot.expander_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_geoip_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"geoip_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_grok_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"grok_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_json_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"json_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_kv_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"kv_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_lowercase_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"lowercase_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_rename_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"rename_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_script_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"script_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_user_agent_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"user.agent_time\")"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.301556420233463,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":10},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"1m Load Average","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"load1\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"load1\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\",\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":14,"yTickStep":1},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":" e/s"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"heightRatio":0.301556420233463,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Logstash EPS","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"in\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"out\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: -r._value}))\n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"in\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"out\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: -r._value}))\n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.301556420233463,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":18},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"UAehjIsi65P8u92M_3sQY","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"_SCP8Npp4NVMx2N4mfuzX","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"BoMPg4R1KDp_UsRORdV3_","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"IO Wait","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_iowait\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_iowait\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":22},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"QDwChKZWuQV0BaJcEeSam","name":"Atlantis","type":"scale","hex":"#74D495"},{"id":"ThD0WTqKHltQEVlq9mo6K","name":"Atlantis","type":"scale","hex":"#3F3FBA"},{"id":"FBHYZiwDLKyQK3eRfUD-0","name":"Atlantis","type":"scale","hex":"#FF4D9E"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Swap Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"swap\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"swap\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":26},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Monitor Interface Drops - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop_in\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"drop_in\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop_in\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":30},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":" days"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Stenographer PCAP Retention","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> map(fn: (r) => ({ r with _value: r._value / (24.0 * 3600.0)}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])"},{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> map(fn: (r) => ({ r with _value: r._value / (24.0 * 3600.0)}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":42},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Suricata Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"suridrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":9},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"pineapple","type":"threshold","hex":"#FFB94A","value":50},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"fire","type":"threshold","hex":"#DC4E58","value":70},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"Swap Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"swap\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":9,"yPos":2},{"colors":[{"id":"base","name":"white","type":"text","hex":"#ffffff"}],"fieldOptions":[{"displayName":"Host","fieldName":"host","visible":true},{"displayName":"Name","fieldName":"container_name","visible":true},{"displayName":"Status","fieldName":"container_status","visible":true},{"displayName":"OOM Killed","fieldName":"_value","visible":true},{"displayName":"_start","fieldName":"_start","visible":true},{"displayName":"_stop","fieldName":"_stop","visible":true},{"displayName":"_time","fieldName":"_time","visible":true},{"displayName":"_field","fieldName":"_field","visible":true},{"displayName":"_measurement","fieldName":"_measurement","visible":true},{"displayName":"engine_host","fieldName":"engine_host","visible":true},{"displayName":"role","fieldName":"role","visible":true},{"displayName":"server_version","fieldName":"server_version","visible":true},{"displayName":"container_image","fieldName":"container_image","visible":true},{"displayName":"container_version","fieldName":"container_version","visible":true},{"displayName":"description","fieldName":"description","visible":true},{"displayName":"maintainer","fieldName":"maintainer","visible":true},{"displayName":"io.k8s.description","fieldName":"io.k8s.description","visible":true},{"displayName":"io.k8s.display-name","fieldName":"io.k8s.display-name","visible":true},{"displayName":"license","fieldName":"license","visible":true},{"displayName":"name","fieldName":"name","visible":true},{"displayName":"org.label-schema.build-date","fieldName":"org.label-schema.build-date","visible":true},{"displayName":"org.label-schema.license","fieldName":"org.label-schema.license","visible":true},{"displayName":"org.label-schema.name","fieldName":"org.label-schema.name","visible":true},{"displayName":"org.label-schema.schema-version","fieldName":"org.label-schema.schema-version","visible":true},{"displayName":"org.label-schema.url","fieldName":"org.label-schema.url","visible":true},{"displayName":"org.label-schema.vcs-ref","fieldName":"org.label-schema.vcs-ref","visible":true},{"displayName":"org.label-schema.vcs-url","fieldName":"org.label-schema.vcs-url","visible":true},{"displayName":"org.label-schema.vendor","fieldName":"org.label-schema.vendor","visible":true},{"displayName":"org.label-schema.version","fieldName":"org.label-schema.version","visible":true},{"displayName":"org.opencontainers.image.created","fieldName":"org.opencontainers.image.created","visible":true},{"displayName":"org.opencontainers.image.licenses","fieldName":"org.opencontainers.image.licenses","visible":true},{"displayName":"org.opencontainers.image.title","fieldName":"org.opencontainers.image.title","visible":true},{"displayName":"org.opencontainers.image.vendor","fieldName":"org.opencontainers.image.vendor","visible":true},{"displayName":"release","fieldName":"release","visible":true},{"displayName":"summary","fieldName":"summary","visible":true},{"displayName":"url","fieldName":"url","visible":true},{"displayName":"vendor","fieldName":"vendor","visible":true},{"displayName":"version","fieldName":"version","visible":true},{"displayName":"org.label-schema.usage","fieldName":"org.label-schema.usage","visible":true},{"displayName":"org.opencontainers.image.documentation","fieldName":"org.opencontainers.image.documentation","visible":true},{"displayName":"org.opencontainers.image.revision","fieldName":"org.opencontainers.image.revision","visible":true},{"displayName":"org.opencontainers.image.source","fieldName":"org.opencontainers.image.source","visible":true},{"displayName":"org.opencontainers.image.url","fieldName":"org.opencontainers.image.url","visible":true},{"displayName":"org.opencontainers.image.version","fieldName":"org.opencontainers.image.version","visible":true},{"displayName":"org.opencontainers.image.description","fieldName":"org.opencontainers.image.description","visible":true}],"height":4,"kind":"Table","name":"Most Recent Container Events","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_status\")\n |> filter(fn: (r) => r[\"_field\"] == \"oomkilled\")\n |> filter(fn: (r) => r[\"container_status\"] != \"running\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"container_name\", \"host\"])\n |> last()\n |> group()\n |> keep(columns: [\"_value\", \"container_name\", \"host\", \"container_status\"])"}],"staticLegend":{},"tableOptions":{"sortBy":"container_name","verticalTimeAxis":true},"timeFormat":"YYYY-MM-DD HH:mm:ss","width":3,"xPos":9,"yPos":6},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Zeek Capture Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekcaptureloss\")\n |> filter(fn: (r) => r[\"_field\"] == \"loss\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekcaptureloss\")\n |> filter(fn: (r) => r[\"_field\"] == \"loss\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","xPos":9,"yCol":"_value","yPos":38},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Stenographer Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"stenodrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":10},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"PCAP Retention","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> hostFilter()\n |> map(fn: (r) => ({r with _value: r._value / (24.0 * 60.0 * 60.0)}))\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":" days","width":1,"xPos":11}],"description":"Visualize the Security Onion grid performance metrics and alarm statuses.","name":"Security Onion Performance"}}] \ No newline at end of file +[{"apiVersion":"influxdata.com/v2alpha1","kind":"Dashboard","metadata":{"name":"vivid-wilson-002001"},"spec":{"charts":[{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Uptime","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> hostFilter()\n |> map(fn: (r) => ({r with _value: r._value / (24 * 60 * 60)}))\n |> group(columns: [\"host\"])\n |> last()\n |> lowestMin(n:1)"}],"staticLegend":{},"suffix":" days","width":1},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"},{"id":"z83MTSufTrlrCoEPiBXda","name":"ruby","type":"text","hex":"#BF3D5E","value":1}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Critical Alarms","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> filter(fn: (r) => r[\"_level\"] == \"crit\")\n |> count()"}],"staticLegend":{},"suffix":" ","width":1,"yPos":2},{"colors":[{"id":"base","name":"rainforest","type":"text","hex":"#4ED8A0"},{"id":"QCTYWuGuHkikYFsZSKMzQ","name":"rainforest","type":"text","hex":"#4ED8A0"},{"id":"QdpMyTRBb0LJ56-P5wfAW","name":"laser","type":"text","hex":"#00C9FF","value":1},{"id":"VQGwCoMrxZyP8asiOW5Cq","name":"tiger","type":"text","hex":"#F48D38","value":2},{"id":"zSO9QkesSIxrU_ntCBx2i","name":"ruby","type":"text","hex":"#BF3D5E","value":3}],"fieldOptions":[{"fieldName":"_time","visible":true},{"displayName":"Alarm","fieldName":"_check_name","visible":true},{"displayName":"Severity","fieldName":"_value","visible":true},{"displayName":"Status","fieldName":"_level","visible":true}],"height":6,"kind":"Table","name":"Alarm Status","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> drop(columns: [\"_value\"])\n |> duplicate(column: \"_level\", as: \"_value\")\n |> map(fn: (r) => ({ r with _value: if r._value == \"ok\" then 0 else if r._value == \"info\" then 1 else if r._value == \"warn\" then 2 else 3 }))\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> keep(columns: [\"_check_name\",\"_level\",\"_value\"])"}],"staticLegend":{},"tableOptions":{"sortBy":"_check_name","verticalTimeAxis":true},"timeFormat":"YYYY-MM-DD HH:mm:ss","width":3,"yPos":4},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"B"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Elasticsearch Storage Size","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"store_size_in_bytes\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"mean\")"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"store_size_in_bytes\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":10},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"B"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"InfluxDB Size","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"influxsize\")\n |> filter(fn: (r) => r[\"_field\"] == \"kbytes\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 1000.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"mean\")"},{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"influxsize\")\n |> filter(fn: (r) => r[\"_field\"] == \"kbytes\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 1000.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":14},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":" days"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"System Uptime","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24 * 60 * 60)}))\n |> yield(name: \"last\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24 * 60 * 60)}))\n |> yield(name: \"Trend\")"}],"shade":true,"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":18},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"lQ75rvTyd2Lq5pZjzy6LB","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"KLfpRZtiEnU2GxjPtrrzQ","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"1kLynwKxvJ3B5IeJnrBqp","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka EPS","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_topics\")\n |> filter(fn: (r) => r[\"_field\"] == \"MessagesInPerSec.Count\")\n |> derivative(unit: 1s, nonNegative: true)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\r\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\r\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_topics\")\r\n |> filter(fn: (r) => r[\"_field\"] == \"MessagesInPerSec.Count\")\r\n |> derivative(unit: 1s, nonNegative: true)\r\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\r\n |> yield(name: \"trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":22},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"System CPU Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: r._value * -1.0 + 100.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\",\"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: r._value * -1.0 + 100.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":26},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"QDwChKZWuQV0BaJcEeSam","name":"Atlantis","type":"scale","hex":"#74D495"},{"id":"ThD0WTqKHltQEVlq9mo6K","name":"Atlantis","type":"scale","hex":"#3F3FBA"},{"id":"FBHYZiwDLKyQK3eRfUD-0","name":"Atlantis","type":"scale","hex":"#FF4D9E"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"System Memory Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":30},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Monitor Interface Traffic - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"bytes_recv\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":34},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Management Interface Traffic - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"bytes_recv\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":6,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":38},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Stenographer Packet Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"stenodrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"stenodrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":42},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Disk Usage /","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":46},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"5m Load Average","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"load5\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"width":1,"xPos":1},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"},{"id":"z83MTSufTrlrCoEPiBXda","name":"tiger","type":"text","hex":"#F48D38","value":1}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Warning Alarms","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> filter(fn: (r) => r[\"_level\"] == \"warn\")\n |> count()"}],"staticLegend":{},"suffix":" ","width":1,"xPos":1,"yPos":2},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"IO Wait","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_iowait\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":2},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"},{"id":"z83MTSufTrlrCoEPiBXda","name":"laser","type":"text","hex":"#00C9FF","value":1}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Informative Alarms","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> filter(fn: (r) => r[\"_level\"] == \"info\")\n |> count()"}],"staticLegend":{},"suffix":" ","width":1,"xPos":2,"yPos":2},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Estimated EPS In","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"in\")\n |> hostFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"width":1,"xPos":3},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"pineapple","type":"threshold","hex":"#FFB94A","value":70},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"fire","type":"threshold","hex":"#DC4E58","value":80},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"CPU Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> map(fn: (r) => ({r with _value: r._value * -1.0 + 100.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":3,"yPos":2},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"kOQLOg2H4FVEE-E1_L8Kq","name":"laser","type":"threshold","hex":"#00C9FF","value":85},{"id":"5IArg2lDb8KvnphywgUXa","name":"tiger","type":"threshold","hex":"#F48D38","value":90},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"ruby","type":"threshold","hex":"#BF3D5E","value":95},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"Root Disk Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":3,"yPos":6},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Suricata Packet Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"suridrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"suridrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","xPos":3,"yCol":"_value","yPos":42},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Redis Queue","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"redisqueue\")\n |> filter(fn: (r) => r[\"_field\"] == \"unparsed\")\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"width":1,"xPos":4},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Elasticsearch Document Count","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"docs_count\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"mean\")"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"docs_count\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":10},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"heightRatio":0.301556420233463,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Redis Queue","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"redisqueue\")\n |> filter(fn: (r) => r[\"_field\"] == \"unparsed\")\n |> group(columns: [\"host\", \"_field\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"redisqueue\")\n |> filter(fn: (r) => r[\"_field\"] == \"unparsed\")\n |> group(columns: [\"host\", \"_field\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.301556420233463,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":14},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":" days"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container Uptime","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_status\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime_ns\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> group(columns: [\"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24 * 60 * 60 * 1000000000)}))\n |> yield(name: \"last\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_status\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime_ns\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> group(columns: [\"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24.0 * 60.0 * 60.0 * 1000000000.0)}))\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":18},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"hoverDimension":"auto","kind":"Single_Stat_Plus_Line","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka Active Controllers","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveControllerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: last, createEmpty: false)\n |> yield(name: \"current\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveControllerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":22},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"hoverDimension":"auto","kind":"Single_Stat_Plus_Line","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka Active Brokers","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveBrokerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"trend\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveBrokerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"current\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":24},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"yT5vTIlaaFChSrQvKLfqf","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"mzzUVSu3ibTph1JmQmDAQ","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"mOcnDo7l8ii6qNLFIB5rs","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container CPU Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"mean\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":26},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"QDwChKZWuQV0BaJcEeSam","name":"Atlantis","type":"scale","hex":"#74D495"},{"id":"ThD0WTqKHltQEVlq9mo6K","name":"Atlantis","type":"scale","hex":"#3F3FBA"},{"id":"FBHYZiwDLKyQK3eRfUD-0","name":"Atlantis","type":"scale","hex":"#FF4D9E"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container Memory Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"mean\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":30},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b"}],"colorizeRows":true,"colors":[{"id":"0ynR6Zs0wuQ3WY0Lz-_KC","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"YiArehCNBwFm9mn8DSXSG","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"DxByY_EQW9Xs2jD5ktkG5","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container Traffic - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_net\")\n |> filter(fn: (r) => r[\"_field\"] == \"rx_bytes\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"mean\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_net\")\n |> filter(fn: (r) => r[\"_field\"] == \"rx_bytes\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with _value: r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":34},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Disk Usage /nsm","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xPos":4,"yPos":46},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Inbound Traffic","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\") \n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: r._value * 8.0 / (1000.0 * 1000.0)}))\n |> group(columns: [\"host\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> last()\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":" Mb/s","width":1,"xPos":5},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Inbound Drops","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop_in\") \n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: r._value * 8.0 / (1000.0 * 1000.0)}))\n |> group(columns: [\"host\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> last()\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":" Mb/s","width":1,"xPos":6},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"pineapple","type":"threshold","hex":"#FFB94A","value":70},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"fire","type":"threshold","hex":"#DC4E58","value":80},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"Memory Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":6,"yPos":2},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"laser","type":"threshold","hex":"#00C9FF","value":85},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"tiger","type":"threshold","hex":"#F48D38","value":90},{"id":"H7uprvKmMEh39en6X-ms_","name":"ruby","type":"threshold","hex":"#BF3D5E","value":95},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"NSM Disk Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":6,"yPos":6},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Management Interface Traffic - Outbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_sent\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n \n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"bytes_sent\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_sent\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n \n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":6,"widthRatio":1,"xCol":"_time","xPos":6,"yCol":"_value","yPos":38},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Zeek Packet Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekdrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekdrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","xPos":6,"yCol":"_value","yPos":42},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Capture Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekcaptureloss\")\n |> filter(fn: (r) => r[\"_field\"] == \"loss\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":7},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Zeek Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekdrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":8},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"s"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"heightRatio":0.301556420233463,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Elastic Ingest Time Spent","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_community_id_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"community.id_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_conditional_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"conditional_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_date_index_name_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"date.index.name_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_date_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"date_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_dissect_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"dissect_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_dot_expander_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"dot.expander_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_geoip_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"geoip_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_grok_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"grok_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_json_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"json_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_kv_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"kv_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_lowercase_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"lowercase_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_rename_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"rename_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_script_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"script_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_user_agent_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"user.agent_time\")"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.301556420233463,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":10},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"1m Load Average","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"load1\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"load1\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\",\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":14,"yTickStep":1},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":" e/s"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"heightRatio":0.301556420233463,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Logstash EPS","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"in\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"out\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: -r._value}))\n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"in\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"out\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: -r._value}))\n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.301556420233463,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":18},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":4,"hoverDimension":"auto","kind":"Single_Stat_Plus_Line","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka Under Replicated Partitions","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_partition\")\n |> filter(fn: (r) => r[\"_field\"] == \"UnderReplicatedPartitions\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_partition\")\n |> filter(fn: (r) => r[\"_field\"] == \"UnderReplicatedPartitions\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":22},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"UAehjIsi65P8u92M_3sQY","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"_SCP8Npp4NVMx2N4mfuzX","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"BoMPg4R1KDp_UsRORdV3_","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"IO Wait","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_iowait\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_iowait\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":26},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"QDwChKZWuQV0BaJcEeSam","name":"Atlantis","type":"scale","hex":"#74D495"},{"id":"ThD0WTqKHltQEVlq9mo6K","name":"Atlantis","type":"scale","hex":"#3F3FBA"},{"id":"FBHYZiwDLKyQK3eRfUD-0","name":"Atlantis","type":"scale","hex":"#FF4D9E"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Swap Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"swap\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"swap\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":30},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Monitor Interface Drops - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop_in\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"drop_in\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop_in\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":34},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":" days"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Stenographer PCAP Retention","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> map(fn: (r) => ({ r with _value: r._value / (24.0 * 3600.0)}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])"},{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> map(fn: (r) => ({ r with _value: r._value / (24.0 * 3600.0)}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":46},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Suricata Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"suridrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":9},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"pineapple","type":"threshold","hex":"#FFB94A","value":50},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"fire","type":"threshold","hex":"#DC4E58","value":70},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"Swap Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"swap\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":9,"yPos":2},{"colors":[{"id":"base","name":"white","type":"text","hex":"#ffffff"}],"fieldOptions":[{"displayName":"Host","fieldName":"host","visible":true},{"displayName":"Name","fieldName":"container_name","visible":true},{"displayName":"Status","fieldName":"container_status","visible":true},{"displayName":"OOM Killed","fieldName":"_value","visible":true},{"displayName":"_start","fieldName":"_start","visible":true},{"displayName":"_stop","fieldName":"_stop","visible":true},{"displayName":"_time","fieldName":"_time","visible":true},{"displayName":"_field","fieldName":"_field","visible":true},{"displayName":"_measurement","fieldName":"_measurement","visible":true},{"displayName":"engine_host","fieldName":"engine_host","visible":true},{"displayName":"role","fieldName":"role","visible":true},{"displayName":"server_version","fieldName":"server_version","visible":true},{"displayName":"container_image","fieldName":"container_image","visible":true},{"displayName":"container_version","fieldName":"container_version","visible":true},{"displayName":"description","fieldName":"description","visible":true},{"displayName":"maintainer","fieldName":"maintainer","visible":true},{"displayName":"io.k8s.description","fieldName":"io.k8s.description","visible":true},{"displayName":"io.k8s.display-name","fieldName":"io.k8s.display-name","visible":true},{"displayName":"license","fieldName":"license","visible":true},{"displayName":"name","fieldName":"name","visible":true},{"displayName":"org.label-schema.build-date","fieldName":"org.label-schema.build-date","visible":true},{"displayName":"org.label-schema.license","fieldName":"org.label-schema.license","visible":true},{"displayName":"org.label-schema.name","fieldName":"org.label-schema.name","visible":true},{"displayName":"org.label-schema.schema-version","fieldName":"org.label-schema.schema-version","visible":true},{"displayName":"org.label-schema.url","fieldName":"org.label-schema.url","visible":true},{"displayName":"org.label-schema.vcs-ref","fieldName":"org.label-schema.vcs-ref","visible":true},{"displayName":"org.label-schema.vcs-url","fieldName":"org.label-schema.vcs-url","visible":true},{"displayName":"org.label-schema.vendor","fieldName":"org.label-schema.vendor","visible":true},{"displayName":"org.label-schema.version","fieldName":"org.label-schema.version","visible":true},{"displayName":"org.opencontainers.image.created","fieldName":"org.opencontainers.image.created","visible":true},{"displayName":"org.opencontainers.image.licenses","fieldName":"org.opencontainers.image.licenses","visible":true},{"displayName":"org.opencontainers.image.title","fieldName":"org.opencontainers.image.title","visible":true},{"displayName":"org.opencontainers.image.vendor","fieldName":"org.opencontainers.image.vendor","visible":true},{"displayName":"release","fieldName":"release","visible":true},{"displayName":"summary","fieldName":"summary","visible":true},{"displayName":"url","fieldName":"url","visible":true},{"displayName":"vendor","fieldName":"vendor","visible":true},{"displayName":"version","fieldName":"version","visible":true},{"displayName":"org.label-schema.usage","fieldName":"org.label-schema.usage","visible":true},{"displayName":"org.opencontainers.image.documentation","fieldName":"org.opencontainers.image.documentation","visible":true},{"displayName":"org.opencontainers.image.revision","fieldName":"org.opencontainers.image.revision","visible":true},{"displayName":"org.opencontainers.image.source","fieldName":"org.opencontainers.image.source","visible":true},{"displayName":"org.opencontainers.image.url","fieldName":"org.opencontainers.image.url","visible":true},{"displayName":"org.opencontainers.image.version","fieldName":"org.opencontainers.image.version","visible":true},{"displayName":"org.opencontainers.image.description","fieldName":"org.opencontainers.image.description","visible":true}],"height":4,"kind":"Table","name":"Most Recent Container Events","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_status\")\n |> filter(fn: (r) => r[\"_field\"] == \"oomkilled\")\n |> filter(fn: (r) => r[\"container_status\"] != \"running\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"container_name\", \"host\"])\n |> last()\n |> group()\n |> keep(columns: [\"_value\", \"container_name\", \"host\", \"container_status\"])"}],"staticLegend":{},"tableOptions":{"sortBy":"container_name","verticalTimeAxis":true},"timeFormat":"YYYY-MM-DD HH:mm:ss","width":3,"xPos":9,"yPos":6},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Zeek Capture Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekcaptureloss\")\n |> filter(fn: (r) => r[\"_field\"] == \"loss\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekcaptureloss\")\n |> filter(fn: (r) => r[\"_field\"] == \"loss\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","xPos":9,"yCol":"_value","yPos":42},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Stenographer Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"stenodrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":10},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"PCAP Retention","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> hostFilter()\n |> map(fn: (r) => ({r with _value: r._value / (24.0 * 60.0 * 60.0)}))\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":" days","width":1,"xPos":11}],"description":"Visualize the Security Onion grid performance metrics and alarm statuses.","name":"Security Onion Performance edit"}}] \ No newline at end of file From 1c1a1a1d3fc0138b3df81c19d61e80a9f4f62ec3 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 28 May 2024 11:14:19 -0400 Subject: [PATCH 638/777] Remove unneeded jolokia aggregate metrics to reduce data ingested to influx Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/telegraf/etc/telegraf.conf | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/salt/telegraf/etc/telegraf.conf b/salt/telegraf/etc/telegraf.conf index 1e8d9d3fe..ecfb0730a 100644 --- a/salt/telegraf/etc/telegraf.conf +++ b/salt/telegraf/etc/telegraf.conf @@ -247,6 +247,25 @@ [[inputs.jolokia2_agent]] name_prefix= "kafka_" urls = ["http://localhost:8778/jolokia"] + fieldexclude = [ + "*.EventType", + "*.FifteenMinuteRate", + "*.FiveMinuteRate", + "*.MeanRate", + "*.OneMinuteRate", + "*.RateUnit", + "*.LatencyUnit", + "*.50thPercentile", + "*.75thPercentile", + "*.95thPercentile", + "*.98thPercentile", + "*.99thPercentile", + "*.999thPercentile", + "*.Min", + "*.Mean", + "*.Max", + "*.StdDev" + ] [[inputs.jolokia2_agent.metric]] name = "topics" From 0d7c331ff0e99e5148a7e6c589eaa386ce0e05d2 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 28 May 2024 11:29:38 -0400 Subject: [PATCH 639/777] only show specific fields when hovering over Kafka influxdb panels Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- .../templates/dashboard-security_onion_performance.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/influxdb/templates/dashboard-security_onion_performance.json b/salt/influxdb/templates/dashboard-security_onion_performance.json index 05eb9de99..831f8eb16 100644 --- a/salt/influxdb/templates/dashboard-security_onion_performance.json +++ b/salt/influxdb/templates/dashboard-security_onion_performance.json @@ -1 +1 @@ -[{"apiVersion":"influxdata.com/v2alpha1","kind":"Dashboard","metadata":{"name":"vivid-wilson-002001"},"spec":{"charts":[{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Uptime","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> hostFilter()\n |> map(fn: (r) => ({r with _value: r._value / (24 * 60 * 60)}))\n |> group(columns: [\"host\"])\n |> last()\n |> lowestMin(n:1)"}],"staticLegend":{},"suffix":" days","width":1},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"},{"id":"z83MTSufTrlrCoEPiBXda","name":"ruby","type":"text","hex":"#BF3D5E","value":1}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Critical Alarms","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> filter(fn: (r) => r[\"_level\"] == \"crit\")\n |> count()"}],"staticLegend":{},"suffix":" ","width":1,"yPos":2},{"colors":[{"id":"base","name":"rainforest","type":"text","hex":"#4ED8A0"},{"id":"QCTYWuGuHkikYFsZSKMzQ","name":"rainforest","type":"text","hex":"#4ED8A0"},{"id":"QdpMyTRBb0LJ56-P5wfAW","name":"laser","type":"text","hex":"#00C9FF","value":1},{"id":"VQGwCoMrxZyP8asiOW5Cq","name":"tiger","type":"text","hex":"#F48D38","value":2},{"id":"zSO9QkesSIxrU_ntCBx2i","name":"ruby","type":"text","hex":"#BF3D5E","value":3}],"fieldOptions":[{"fieldName":"_time","visible":true},{"displayName":"Alarm","fieldName":"_check_name","visible":true},{"displayName":"Severity","fieldName":"_value","visible":true},{"displayName":"Status","fieldName":"_level","visible":true}],"height":6,"kind":"Table","name":"Alarm Status","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> drop(columns: [\"_value\"])\n |> duplicate(column: \"_level\", as: \"_value\")\n |> map(fn: (r) => ({ r with _value: if r._value == \"ok\" then 0 else if r._value == \"info\" then 1 else if r._value == \"warn\" then 2 else 3 }))\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> keep(columns: [\"_check_name\",\"_level\",\"_value\"])"}],"staticLegend":{},"tableOptions":{"sortBy":"_check_name","verticalTimeAxis":true},"timeFormat":"YYYY-MM-DD HH:mm:ss","width":3,"yPos":4},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"B"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Elasticsearch Storage Size","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"store_size_in_bytes\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"mean\")"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"store_size_in_bytes\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":10},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"B"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"InfluxDB Size","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"influxsize\")\n |> filter(fn: (r) => r[\"_field\"] == \"kbytes\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 1000.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"mean\")"},{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"influxsize\")\n |> filter(fn: (r) => r[\"_field\"] == \"kbytes\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 1000.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":14},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":" days"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"System Uptime","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24 * 60 * 60)}))\n |> yield(name: \"last\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24 * 60 * 60)}))\n |> yield(name: \"Trend\")"}],"shade":true,"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":18},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"lQ75rvTyd2Lq5pZjzy6LB","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"KLfpRZtiEnU2GxjPtrrzQ","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"1kLynwKxvJ3B5IeJnrBqp","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka EPS","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_topics\")\n |> filter(fn: (r) => r[\"_field\"] == \"MessagesInPerSec.Count\")\n |> derivative(unit: 1s, nonNegative: true)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\r\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\r\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_topics\")\r\n |> filter(fn: (r) => r[\"_field\"] == \"MessagesInPerSec.Count\")\r\n |> derivative(unit: 1s, nonNegative: true)\r\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\r\n |> yield(name: \"trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":22},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"System CPU Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: r._value * -1.0 + 100.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\",\"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: r._value * -1.0 + 100.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":26},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"QDwChKZWuQV0BaJcEeSam","name":"Atlantis","type":"scale","hex":"#74D495"},{"id":"ThD0WTqKHltQEVlq9mo6K","name":"Atlantis","type":"scale","hex":"#3F3FBA"},{"id":"FBHYZiwDLKyQK3eRfUD-0","name":"Atlantis","type":"scale","hex":"#FF4D9E"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"System Memory Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":30},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Monitor Interface Traffic - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"bytes_recv\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":34},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Management Interface Traffic - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"bytes_recv\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":6,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":38},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Stenographer Packet Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"stenodrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"stenodrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":42},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Disk Usage /","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":46},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"5m Load Average","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"load5\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"width":1,"xPos":1},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"},{"id":"z83MTSufTrlrCoEPiBXda","name":"tiger","type":"text","hex":"#F48D38","value":1}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Warning Alarms","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> filter(fn: (r) => r[\"_level\"] == \"warn\")\n |> count()"}],"staticLegend":{},"suffix":" ","width":1,"xPos":1,"yPos":2},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"IO Wait","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_iowait\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":2},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"},{"id":"z83MTSufTrlrCoEPiBXda","name":"laser","type":"text","hex":"#00C9FF","value":1}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Informative Alarms","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> filter(fn: (r) => r[\"_level\"] == \"info\")\n |> count()"}],"staticLegend":{},"suffix":" ","width":1,"xPos":2,"yPos":2},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Estimated EPS In","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"in\")\n |> hostFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"width":1,"xPos":3},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"pineapple","type":"threshold","hex":"#FFB94A","value":70},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"fire","type":"threshold","hex":"#DC4E58","value":80},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"CPU Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> map(fn: (r) => ({r with _value: r._value * -1.0 + 100.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":3,"yPos":2},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"kOQLOg2H4FVEE-E1_L8Kq","name":"laser","type":"threshold","hex":"#00C9FF","value":85},{"id":"5IArg2lDb8KvnphywgUXa","name":"tiger","type":"threshold","hex":"#F48D38","value":90},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"ruby","type":"threshold","hex":"#BF3D5E","value":95},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"Root Disk Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":3,"yPos":6},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Suricata Packet Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"suridrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"suridrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","xPos":3,"yCol":"_value","yPos":42},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Redis Queue","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"redisqueue\")\n |> filter(fn: (r) => r[\"_field\"] == \"unparsed\")\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"width":1,"xPos":4},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Elasticsearch Document Count","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"docs_count\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"mean\")"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"docs_count\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":10},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"heightRatio":0.301556420233463,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Redis Queue","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"redisqueue\")\n |> filter(fn: (r) => r[\"_field\"] == \"unparsed\")\n |> group(columns: [\"host\", \"_field\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"redisqueue\")\n |> filter(fn: (r) => r[\"_field\"] == \"unparsed\")\n |> group(columns: [\"host\", \"_field\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.301556420233463,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":14},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":" days"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container Uptime","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_status\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime_ns\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> group(columns: [\"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24 * 60 * 60 * 1000000000)}))\n |> yield(name: \"last\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_status\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime_ns\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> group(columns: [\"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24.0 * 60.0 * 60.0 * 1000000000.0)}))\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":18},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"hoverDimension":"auto","kind":"Single_Stat_Plus_Line","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka Active Controllers","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveControllerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: last, createEmpty: false)\n |> yield(name: \"current\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveControllerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":22},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"hoverDimension":"auto","kind":"Single_Stat_Plus_Line","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka Active Brokers","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveBrokerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"trend\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveBrokerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"current\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":24},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"yT5vTIlaaFChSrQvKLfqf","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"mzzUVSu3ibTph1JmQmDAQ","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"mOcnDo7l8ii6qNLFIB5rs","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container CPU Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"mean\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":26},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"QDwChKZWuQV0BaJcEeSam","name":"Atlantis","type":"scale","hex":"#74D495"},{"id":"ThD0WTqKHltQEVlq9mo6K","name":"Atlantis","type":"scale","hex":"#3F3FBA"},{"id":"FBHYZiwDLKyQK3eRfUD-0","name":"Atlantis","type":"scale","hex":"#FF4D9E"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container Memory Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"mean\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":30},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b"}],"colorizeRows":true,"colors":[{"id":"0ynR6Zs0wuQ3WY0Lz-_KC","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"YiArehCNBwFm9mn8DSXSG","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"DxByY_EQW9Xs2jD5ktkG5","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container Traffic - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_net\")\n |> filter(fn: (r) => r[\"_field\"] == \"rx_bytes\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"mean\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_net\")\n |> filter(fn: (r) => r[\"_field\"] == \"rx_bytes\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with _value: r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":34},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Disk Usage /nsm","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xPos":4,"yPos":46},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Inbound Traffic","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\") \n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: r._value * 8.0 / (1000.0 * 1000.0)}))\n |> group(columns: [\"host\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> last()\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":" Mb/s","width":1,"xPos":5},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Inbound Drops","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop_in\") \n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: r._value * 8.0 / (1000.0 * 1000.0)}))\n |> group(columns: [\"host\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> last()\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":" Mb/s","width":1,"xPos":6},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"pineapple","type":"threshold","hex":"#FFB94A","value":70},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"fire","type":"threshold","hex":"#DC4E58","value":80},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"Memory Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":6,"yPos":2},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"laser","type":"threshold","hex":"#00C9FF","value":85},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"tiger","type":"threshold","hex":"#F48D38","value":90},{"id":"H7uprvKmMEh39en6X-ms_","name":"ruby","type":"threshold","hex":"#BF3D5E","value":95},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"NSM Disk Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":6,"yPos":6},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Management Interface Traffic - Outbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_sent\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n \n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"bytes_sent\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_sent\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n \n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":6,"widthRatio":1,"xCol":"_time","xPos":6,"yCol":"_value","yPos":38},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Zeek Packet Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekdrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekdrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","xPos":6,"yCol":"_value","yPos":42},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Capture Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekcaptureloss\")\n |> filter(fn: (r) => r[\"_field\"] == \"loss\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":7},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Zeek Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekdrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":8},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"s"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"heightRatio":0.301556420233463,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Elastic Ingest Time Spent","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_community_id_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"community.id_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_conditional_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"conditional_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_date_index_name_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"date.index.name_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_date_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"date_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_dissect_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"dissect_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_dot_expander_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"dot.expander_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_geoip_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"geoip_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_grok_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"grok_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_json_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"json_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_kv_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"kv_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_lowercase_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"lowercase_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_rename_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"rename_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_script_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"script_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_user_agent_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"user.agent_time\")"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.301556420233463,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":10},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"1m Load Average","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"load1\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"load1\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\",\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":14,"yTickStep":1},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":" e/s"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"heightRatio":0.301556420233463,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Logstash EPS","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"in\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"out\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: -r._value}))\n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"in\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"out\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: -r._value}))\n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.301556420233463,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":18},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":4,"hoverDimension":"auto","kind":"Single_Stat_Plus_Line","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka Under Replicated Partitions","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_partition\")\n |> filter(fn: (r) => r[\"_field\"] == \"UnderReplicatedPartitions\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_partition\")\n |> filter(fn: (r) => r[\"_field\"] == \"UnderReplicatedPartitions\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":22},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"UAehjIsi65P8u92M_3sQY","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"_SCP8Npp4NVMx2N4mfuzX","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"BoMPg4R1KDp_UsRORdV3_","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"IO Wait","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_iowait\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_iowait\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":26},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"QDwChKZWuQV0BaJcEeSam","name":"Atlantis","type":"scale","hex":"#74D495"},{"id":"ThD0WTqKHltQEVlq9mo6K","name":"Atlantis","type":"scale","hex":"#3F3FBA"},{"id":"FBHYZiwDLKyQK3eRfUD-0","name":"Atlantis","type":"scale","hex":"#FF4D9E"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Swap Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"swap\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"swap\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":30},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Monitor Interface Drops - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop_in\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"drop_in\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop_in\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":34},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":" days"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Stenographer PCAP Retention","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> map(fn: (r) => ({ r with _value: r._value / (24.0 * 3600.0)}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])"},{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> map(fn: (r) => ({ r with _value: r._value / (24.0 * 3600.0)}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":46},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Suricata Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"suridrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":9},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"pineapple","type":"threshold","hex":"#FFB94A","value":50},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"fire","type":"threshold","hex":"#DC4E58","value":70},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"Swap Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"swap\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":9,"yPos":2},{"colors":[{"id":"base","name":"white","type":"text","hex":"#ffffff"}],"fieldOptions":[{"displayName":"Host","fieldName":"host","visible":true},{"displayName":"Name","fieldName":"container_name","visible":true},{"displayName":"Status","fieldName":"container_status","visible":true},{"displayName":"OOM Killed","fieldName":"_value","visible":true},{"displayName":"_start","fieldName":"_start","visible":true},{"displayName":"_stop","fieldName":"_stop","visible":true},{"displayName":"_time","fieldName":"_time","visible":true},{"displayName":"_field","fieldName":"_field","visible":true},{"displayName":"_measurement","fieldName":"_measurement","visible":true},{"displayName":"engine_host","fieldName":"engine_host","visible":true},{"displayName":"role","fieldName":"role","visible":true},{"displayName":"server_version","fieldName":"server_version","visible":true},{"displayName":"container_image","fieldName":"container_image","visible":true},{"displayName":"container_version","fieldName":"container_version","visible":true},{"displayName":"description","fieldName":"description","visible":true},{"displayName":"maintainer","fieldName":"maintainer","visible":true},{"displayName":"io.k8s.description","fieldName":"io.k8s.description","visible":true},{"displayName":"io.k8s.display-name","fieldName":"io.k8s.display-name","visible":true},{"displayName":"license","fieldName":"license","visible":true},{"displayName":"name","fieldName":"name","visible":true},{"displayName":"org.label-schema.build-date","fieldName":"org.label-schema.build-date","visible":true},{"displayName":"org.label-schema.license","fieldName":"org.label-schema.license","visible":true},{"displayName":"org.label-schema.name","fieldName":"org.label-schema.name","visible":true},{"displayName":"org.label-schema.schema-version","fieldName":"org.label-schema.schema-version","visible":true},{"displayName":"org.label-schema.url","fieldName":"org.label-schema.url","visible":true},{"displayName":"org.label-schema.vcs-ref","fieldName":"org.label-schema.vcs-ref","visible":true},{"displayName":"org.label-schema.vcs-url","fieldName":"org.label-schema.vcs-url","visible":true},{"displayName":"org.label-schema.vendor","fieldName":"org.label-schema.vendor","visible":true},{"displayName":"org.label-schema.version","fieldName":"org.label-schema.version","visible":true},{"displayName":"org.opencontainers.image.created","fieldName":"org.opencontainers.image.created","visible":true},{"displayName":"org.opencontainers.image.licenses","fieldName":"org.opencontainers.image.licenses","visible":true},{"displayName":"org.opencontainers.image.title","fieldName":"org.opencontainers.image.title","visible":true},{"displayName":"org.opencontainers.image.vendor","fieldName":"org.opencontainers.image.vendor","visible":true},{"displayName":"release","fieldName":"release","visible":true},{"displayName":"summary","fieldName":"summary","visible":true},{"displayName":"url","fieldName":"url","visible":true},{"displayName":"vendor","fieldName":"vendor","visible":true},{"displayName":"version","fieldName":"version","visible":true},{"displayName":"org.label-schema.usage","fieldName":"org.label-schema.usage","visible":true},{"displayName":"org.opencontainers.image.documentation","fieldName":"org.opencontainers.image.documentation","visible":true},{"displayName":"org.opencontainers.image.revision","fieldName":"org.opencontainers.image.revision","visible":true},{"displayName":"org.opencontainers.image.source","fieldName":"org.opencontainers.image.source","visible":true},{"displayName":"org.opencontainers.image.url","fieldName":"org.opencontainers.image.url","visible":true},{"displayName":"org.opencontainers.image.version","fieldName":"org.opencontainers.image.version","visible":true},{"displayName":"org.opencontainers.image.description","fieldName":"org.opencontainers.image.description","visible":true}],"height":4,"kind":"Table","name":"Most Recent Container Events","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_status\")\n |> filter(fn: (r) => r[\"_field\"] == \"oomkilled\")\n |> filter(fn: (r) => r[\"container_status\"] != \"running\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"container_name\", \"host\"])\n |> last()\n |> group()\n |> keep(columns: [\"_value\", \"container_name\", \"host\", \"container_status\"])"}],"staticLegend":{},"tableOptions":{"sortBy":"container_name","verticalTimeAxis":true},"timeFormat":"YYYY-MM-DD HH:mm:ss","width":3,"xPos":9,"yPos":6},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Zeek Capture Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekcaptureloss\")\n |> filter(fn: (r) => r[\"_field\"] == \"loss\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekcaptureloss\")\n |> filter(fn: (r) => r[\"_field\"] == \"loss\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","xPos":9,"yCol":"_value","yPos":42},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Stenographer Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"stenodrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":10},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"PCAP Retention","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> hostFilter()\n |> map(fn: (r) => ({r with _value: r._value / (24.0 * 60.0 * 60.0)}))\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":" days","width":1,"xPos":11}],"description":"Visualize the Security Onion grid performance metrics and alarm statuses.","name":"Security Onion Performance edit"}}] \ No newline at end of file +[{"apiVersion":"influxdata.com/v2alpha1","kind":"Dashboard","metadata":{"name":"vivid-wilson-002001"},"spec":{"charts":[{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Uptime","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> hostFilter()\n |> map(fn: (r) => ({r with _value: r._value / (24 * 60 * 60)}))\n |> group(columns: [\"host\"])\n |> last()\n |> lowestMin(n:1)"}],"staticLegend":{},"suffix":" days","width":1},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"},{"id":"z83MTSufTrlrCoEPiBXda","name":"ruby","type":"text","hex":"#BF3D5E","value":1}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Critical Alarms","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> filter(fn: (r) => r[\"_level\"] == \"crit\")\n |> count()"}],"staticLegend":{},"suffix":" ","width":1,"yPos":2},{"colors":[{"id":"base","name":"rainforest","type":"text","hex":"#4ED8A0"},{"id":"QCTYWuGuHkikYFsZSKMzQ","name":"rainforest","type":"text","hex":"#4ED8A0"},{"id":"QdpMyTRBb0LJ56-P5wfAW","name":"laser","type":"text","hex":"#00C9FF","value":1},{"id":"VQGwCoMrxZyP8asiOW5Cq","name":"tiger","type":"text","hex":"#F48D38","value":2},{"id":"zSO9QkesSIxrU_ntCBx2i","name":"ruby","type":"text","hex":"#BF3D5E","value":3}],"fieldOptions":[{"fieldName":"_time","visible":true},{"displayName":"Alarm","fieldName":"_check_name","visible":true},{"displayName":"Severity","fieldName":"_value","visible":true},{"displayName":"Status","fieldName":"_level","visible":true}],"height":6,"kind":"Table","name":"Alarm Status","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> drop(columns: [\"_value\"])\n |> duplicate(column: \"_level\", as: \"_value\")\n |> map(fn: (r) => ({ r with _value: if r._value == \"ok\" then 0 else if r._value == \"info\" then 1 else if r._value == \"warn\" then 2 else 3 }))\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> keep(columns: [\"_check_name\",\"_level\",\"_value\"])"}],"staticLegend":{},"tableOptions":{"sortBy":"_check_name","verticalTimeAxis":true},"timeFormat":"YYYY-MM-DD HH:mm:ss","width":3,"yPos":4},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"B"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Elasticsearch Storage Size","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"store_size_in_bytes\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"mean\")"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"store_size_in_bytes\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":10},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"B"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"InfluxDB Size","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"influxsize\")\n |> filter(fn: (r) => r[\"_field\"] == \"kbytes\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 1000.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"mean\")"},{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"influxsize\")\n |> filter(fn: (r) => r[\"_field\"] == \"kbytes\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 1000.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":14},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":" days"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"System Uptime","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24 * 60 * 60)}))\n |> yield(name: \"last\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24 * 60 * 60)}))\n |> yield(name: \"Trend\")"}],"shade":true,"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":18},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"lQ75rvTyd2Lq5pZjzy6LB","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"KLfpRZtiEnU2GxjPtrrzQ","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"1kLynwKxvJ3B5IeJnrBqp","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka EPS","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_topics\")\n |> filter(fn: (r) => r[\"_field\"] == \"MessagesInPerSec.Count\")\n |> derivative(unit: 1s, nonNegative: true)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_topics\")\n |> filter(fn: (r) => r[\"_field\"] == \"MessagesInPerSec.Count\")\n |> derivative(unit: 1s, nonNegative: true)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":22},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"System CPU Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: r._value * -1.0 + 100.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\",\"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: r._value * -1.0 + 100.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":26},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"QDwChKZWuQV0BaJcEeSam","name":"Atlantis","type":"scale","hex":"#74D495"},{"id":"ThD0WTqKHltQEVlq9mo6K","name":"Atlantis","type":"scale","hex":"#3F3FBA"},{"id":"FBHYZiwDLKyQK3eRfUD-0","name":"Atlantis","type":"scale","hex":"#FF4D9E"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"System Memory Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":30},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Monitor Interface Traffic - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"bytes_recv\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":34},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Management Interface Traffic - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"bytes_recv\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":6,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":38},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Stenographer Packet Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"stenodrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"stenodrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":42},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Disk Usage /","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":46},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"5m Load Average","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"load5\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"width":1,"xPos":1},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"},{"id":"z83MTSufTrlrCoEPiBXda","name":"tiger","type":"text","hex":"#F48D38","value":1}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Warning Alarms","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> filter(fn: (r) => r[\"_level\"] == \"warn\")\n |> count()"}],"staticLegend":{},"suffix":" ","width":1,"xPos":1,"yPos":2},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"IO Wait","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_iowait\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":2},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"},{"id":"z83MTSufTrlrCoEPiBXda","name":"laser","type":"text","hex":"#00C9FF","value":1}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Informative Alarms","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> filter(fn: (r) => r[\"_level\"] == \"info\")\n |> count()"}],"staticLegend":{},"suffix":" ","width":1,"xPos":2,"yPos":2},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Estimated EPS In","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"in\")\n |> hostFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"width":1,"xPos":3},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"pineapple","type":"threshold","hex":"#FFB94A","value":70},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"fire","type":"threshold","hex":"#DC4E58","value":80},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"CPU Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> map(fn: (r) => ({r with _value: r._value * -1.0 + 100.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":3,"yPos":2},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"kOQLOg2H4FVEE-E1_L8Kq","name":"laser","type":"threshold","hex":"#00C9FF","value":85},{"id":"5IArg2lDb8KvnphywgUXa","name":"tiger","type":"threshold","hex":"#F48D38","value":90},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"ruby","type":"threshold","hex":"#BF3D5E","value":95},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"Root Disk Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":3,"yPos":6},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Suricata Packet Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"suridrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"suridrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","xPos":3,"yCol":"_value","yPos":42},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Redis Queue","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"redisqueue\")\n |> filter(fn: (r) => r[\"_field\"] == \"unparsed\")\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"width":1,"xPos":4},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Elasticsearch Document Count","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"docs_count\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"mean\")"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"docs_count\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":10},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"heightRatio":0.301556420233463,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Redis Queue","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"redisqueue\")\n |> filter(fn: (r) => r[\"_field\"] == \"unparsed\")\n |> group(columns: [\"host\", \"_field\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"redisqueue\")\n |> filter(fn: (r) => r[\"_field\"] == \"unparsed\")\n |> group(columns: [\"host\", \"_field\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.301556420233463,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":14},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":" days"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container Uptime","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_status\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime_ns\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> group(columns: [\"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24 * 60 * 60 * 1000000000)}))\n |> yield(name: \"last\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_status\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime_ns\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> group(columns: [\"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24.0 * 60.0 * 60.0 * 1000000000.0)}))\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":18},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"hoverDimension":"auto","kind":"Single_Stat_Plus_Line","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka Active Controllers","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveControllerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: last, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"current\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveControllerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":22},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"hoverDimension":"auto","kind":"Single_Stat_Plus_Line","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka Active Brokers","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveBrokerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"trend\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveBrokerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"current\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":24},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"yT5vTIlaaFChSrQvKLfqf","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"mzzUVSu3ibTph1JmQmDAQ","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"mOcnDo7l8ii6qNLFIB5rs","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container CPU Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"mean\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":26},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"QDwChKZWuQV0BaJcEeSam","name":"Atlantis","type":"scale","hex":"#74D495"},{"id":"ThD0WTqKHltQEVlq9mo6K","name":"Atlantis","type":"scale","hex":"#3F3FBA"},{"id":"FBHYZiwDLKyQK3eRfUD-0","name":"Atlantis","type":"scale","hex":"#FF4D9E"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container Memory Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"mean\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":30},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b"}],"colorizeRows":true,"colors":[{"id":"0ynR6Zs0wuQ3WY0Lz-_KC","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"YiArehCNBwFm9mn8DSXSG","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"DxByY_EQW9Xs2jD5ktkG5","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container Traffic - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_net\")\n |> filter(fn: (r) => r[\"_field\"] == \"rx_bytes\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"mean\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_net\")\n |> filter(fn: (r) => r[\"_field\"] == \"rx_bytes\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with _value: r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":34},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Disk Usage /nsm","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xPos":4,"yPos":46},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Inbound Traffic","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\") \n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: r._value * 8.0 / (1000.0 * 1000.0)}))\n |> group(columns: [\"host\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> last()\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":" Mb/s","width":1,"xPos":5},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Inbound Drops","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop_in\") \n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: r._value * 8.0 / (1000.0 * 1000.0)}))\n |> group(columns: [\"host\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> last()\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":" Mb/s","width":1,"xPos":6},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"pineapple","type":"threshold","hex":"#FFB94A","value":70},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"fire","type":"threshold","hex":"#DC4E58","value":80},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"Memory Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":6,"yPos":2},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"laser","type":"threshold","hex":"#00C9FF","value":85},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"tiger","type":"threshold","hex":"#F48D38","value":90},{"id":"H7uprvKmMEh39en6X-ms_","name":"ruby","type":"threshold","hex":"#BF3D5E","value":95},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"NSM Disk Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":6,"yPos":6},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Management Interface Traffic - Outbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_sent\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n \n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"bytes_sent\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_sent\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n \n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":6,"widthRatio":1,"xCol":"_time","xPos":6,"yCol":"_value","yPos":38},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Zeek Packet Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekdrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekdrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","xPos":6,"yCol":"_value","yPos":42},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Capture Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekcaptureloss\")\n |> filter(fn: (r) => r[\"_field\"] == \"loss\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":7},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Zeek Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekdrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":8},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"s"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"heightRatio":0.301556420233463,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Elastic Ingest Time Spent","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_community_id_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"community.id_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_conditional_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"conditional_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_date_index_name_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"date.index.name_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_date_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"date_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_dissect_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"dissect_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_dot_expander_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"dot.expander_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_geoip_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"geoip_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_grok_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"grok_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_json_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"json_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_kv_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"kv_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_lowercase_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"lowercase_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_rename_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"rename_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_script_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"script_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_user_agent_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"user.agent_time\")"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.301556420233463,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":10},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"1m Load Average","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"load1\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"load1\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\",\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":14,"yTickStep":1},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":" e/s"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"heightRatio":0.301556420233463,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Logstash EPS","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"in\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"out\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: -r._value}))\n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"in\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"out\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: -r._value}))\n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.301556420233463,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":18},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":4,"hoverDimension":"auto","kind":"Single_Stat_Plus_Line","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka Under Replicated Partitions","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_partition\")\n |> filter(fn: (r) => r[\"_field\"] == \"UnderReplicatedPartitions\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"partition\",\"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_partition\")\n |> filter(fn: (r) => r[\"_field\"] == \"UnderReplicatedPartitions\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"partition\",\"host\", \"role\"])\n |> yield(name: \"trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":22},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"UAehjIsi65P8u92M_3sQY","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"_SCP8Npp4NVMx2N4mfuzX","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"BoMPg4R1KDp_UsRORdV3_","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"IO Wait","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_iowait\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_iowait\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":26},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"QDwChKZWuQV0BaJcEeSam","name":"Atlantis","type":"scale","hex":"#74D495"},{"id":"ThD0WTqKHltQEVlq9mo6K","name":"Atlantis","type":"scale","hex":"#3F3FBA"},{"id":"FBHYZiwDLKyQK3eRfUD-0","name":"Atlantis","type":"scale","hex":"#FF4D9E"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Swap Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"swap\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"swap\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":30},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Monitor Interface Drops - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop_in\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"drop_in\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop_in\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":34},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":" days"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Stenographer PCAP Retention","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> map(fn: (r) => ({ r with _value: r._value / (24.0 * 3600.0)}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])"},{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> map(fn: (r) => ({ r with _value: r._value / (24.0 * 3600.0)}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":46},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Suricata Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"suridrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":9},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"pineapple","type":"threshold","hex":"#FFB94A","value":50},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"fire","type":"threshold","hex":"#DC4E58","value":70},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"Swap Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"swap\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":9,"yPos":2},{"colors":[{"id":"base","name":"white","type":"text","hex":"#ffffff"}],"fieldOptions":[{"displayName":"Host","fieldName":"host","visible":true},{"displayName":"Name","fieldName":"container_name","visible":true},{"displayName":"Status","fieldName":"container_status","visible":true},{"displayName":"OOM Killed","fieldName":"_value","visible":true},{"displayName":"_start","fieldName":"_start","visible":true},{"displayName":"_stop","fieldName":"_stop","visible":true},{"displayName":"_time","fieldName":"_time","visible":true},{"displayName":"_field","fieldName":"_field","visible":true},{"displayName":"_measurement","fieldName":"_measurement","visible":true},{"displayName":"engine_host","fieldName":"engine_host","visible":true},{"displayName":"role","fieldName":"role","visible":true},{"displayName":"server_version","fieldName":"server_version","visible":true},{"displayName":"container_image","fieldName":"container_image","visible":true},{"displayName":"container_version","fieldName":"container_version","visible":true},{"displayName":"description","fieldName":"description","visible":true},{"displayName":"maintainer","fieldName":"maintainer","visible":true},{"displayName":"io.k8s.description","fieldName":"io.k8s.description","visible":true},{"displayName":"io.k8s.display-name","fieldName":"io.k8s.display-name","visible":true},{"displayName":"license","fieldName":"license","visible":true},{"displayName":"name","fieldName":"name","visible":true},{"displayName":"org.label-schema.build-date","fieldName":"org.label-schema.build-date","visible":true},{"displayName":"org.label-schema.license","fieldName":"org.label-schema.license","visible":true},{"displayName":"org.label-schema.name","fieldName":"org.label-schema.name","visible":true},{"displayName":"org.label-schema.schema-version","fieldName":"org.label-schema.schema-version","visible":true},{"displayName":"org.label-schema.url","fieldName":"org.label-schema.url","visible":true},{"displayName":"org.label-schema.vcs-ref","fieldName":"org.label-schema.vcs-ref","visible":true},{"displayName":"org.label-schema.vcs-url","fieldName":"org.label-schema.vcs-url","visible":true},{"displayName":"org.label-schema.vendor","fieldName":"org.label-schema.vendor","visible":true},{"displayName":"org.label-schema.version","fieldName":"org.label-schema.version","visible":true},{"displayName":"org.opencontainers.image.created","fieldName":"org.opencontainers.image.created","visible":true},{"displayName":"org.opencontainers.image.licenses","fieldName":"org.opencontainers.image.licenses","visible":true},{"displayName":"org.opencontainers.image.title","fieldName":"org.opencontainers.image.title","visible":true},{"displayName":"org.opencontainers.image.vendor","fieldName":"org.opencontainers.image.vendor","visible":true},{"displayName":"release","fieldName":"release","visible":true},{"displayName":"summary","fieldName":"summary","visible":true},{"displayName":"url","fieldName":"url","visible":true},{"displayName":"vendor","fieldName":"vendor","visible":true},{"displayName":"version","fieldName":"version","visible":true},{"displayName":"org.label-schema.usage","fieldName":"org.label-schema.usage","visible":true},{"displayName":"org.opencontainers.image.documentation","fieldName":"org.opencontainers.image.documentation","visible":true},{"displayName":"org.opencontainers.image.revision","fieldName":"org.opencontainers.image.revision","visible":true},{"displayName":"org.opencontainers.image.source","fieldName":"org.opencontainers.image.source","visible":true},{"displayName":"org.opencontainers.image.url","fieldName":"org.opencontainers.image.url","visible":true},{"displayName":"org.opencontainers.image.version","fieldName":"org.opencontainers.image.version","visible":true},{"displayName":"org.opencontainers.image.description","fieldName":"org.opencontainers.image.description","visible":true}],"height":4,"kind":"Table","name":"Most Recent Container Events","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_status\")\n |> filter(fn: (r) => r[\"_field\"] == \"oomkilled\")\n |> filter(fn: (r) => r[\"container_status\"] != \"running\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"container_name\", \"host\"])\n |> last()\n |> group()\n |> keep(columns: [\"_value\", \"container_name\", \"host\", \"container_status\"])"}],"staticLegend":{},"tableOptions":{"sortBy":"container_name","verticalTimeAxis":true},"timeFormat":"YYYY-MM-DD HH:mm:ss","width":3,"xPos":9,"yPos":6},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Zeek Capture Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekcaptureloss\")\n |> filter(fn: (r) => r[\"_field\"] == \"loss\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekcaptureloss\")\n |> filter(fn: (r) => r[\"_field\"] == \"loss\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","xPos":9,"yCol":"_value","yPos":42},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Stenographer Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"stenodrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":10},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"PCAP Retention","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> hostFilter()\n |> map(fn: (r) => ({r with _value: r._value / (24.0 * 60.0 * 60.0)}))\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":" days","width":1,"xPos":11}],"description":"Visualize the Security Onion grid performance metrics and alarm statuses.","name":"Security Onion Performance edit"}}] \ No newline at end of file From 77b5aa4369ae46240f069ef52fdb75714def9322 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 28 May 2024 11:34:35 -0400 Subject: [PATCH 640/777] Correct dashboard name Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- .../templates/dashboard-security_onion_performance.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/influxdb/templates/dashboard-security_onion_performance.json b/salt/influxdb/templates/dashboard-security_onion_performance.json index 831f8eb16..4f543c8d1 100644 --- a/salt/influxdb/templates/dashboard-security_onion_performance.json +++ b/salt/influxdb/templates/dashboard-security_onion_performance.json @@ -1 +1 @@ -[{"apiVersion":"influxdata.com/v2alpha1","kind":"Dashboard","metadata":{"name":"vivid-wilson-002001"},"spec":{"charts":[{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Uptime","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> hostFilter()\n |> map(fn: (r) => ({r with _value: r._value / (24 * 60 * 60)}))\n |> group(columns: [\"host\"])\n |> last()\n |> lowestMin(n:1)"}],"staticLegend":{},"suffix":" days","width":1},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"},{"id":"z83MTSufTrlrCoEPiBXda","name":"ruby","type":"text","hex":"#BF3D5E","value":1}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Critical Alarms","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> filter(fn: (r) => r[\"_level\"] == \"crit\")\n |> count()"}],"staticLegend":{},"suffix":" ","width":1,"yPos":2},{"colors":[{"id":"base","name":"rainforest","type":"text","hex":"#4ED8A0"},{"id":"QCTYWuGuHkikYFsZSKMzQ","name":"rainforest","type":"text","hex":"#4ED8A0"},{"id":"QdpMyTRBb0LJ56-P5wfAW","name":"laser","type":"text","hex":"#00C9FF","value":1},{"id":"VQGwCoMrxZyP8asiOW5Cq","name":"tiger","type":"text","hex":"#F48D38","value":2},{"id":"zSO9QkesSIxrU_ntCBx2i","name":"ruby","type":"text","hex":"#BF3D5E","value":3}],"fieldOptions":[{"fieldName":"_time","visible":true},{"displayName":"Alarm","fieldName":"_check_name","visible":true},{"displayName":"Severity","fieldName":"_value","visible":true},{"displayName":"Status","fieldName":"_level","visible":true}],"height":6,"kind":"Table","name":"Alarm Status","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> drop(columns: [\"_value\"])\n |> duplicate(column: \"_level\", as: \"_value\")\n |> map(fn: (r) => ({ r with _value: if r._value == \"ok\" then 0 else if r._value == \"info\" then 1 else if r._value == \"warn\" then 2 else 3 }))\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> keep(columns: [\"_check_name\",\"_level\",\"_value\"])"}],"staticLegend":{},"tableOptions":{"sortBy":"_check_name","verticalTimeAxis":true},"timeFormat":"YYYY-MM-DD HH:mm:ss","width":3,"yPos":4},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"B"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Elasticsearch Storage Size","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"store_size_in_bytes\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"mean\")"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"store_size_in_bytes\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":10},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"B"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"InfluxDB Size","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"influxsize\")\n |> filter(fn: (r) => r[\"_field\"] == \"kbytes\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 1000.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"mean\")"},{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"influxsize\")\n |> filter(fn: (r) => r[\"_field\"] == \"kbytes\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 1000.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":14},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":" days"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"System Uptime","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24 * 60 * 60)}))\n |> yield(name: \"last\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24 * 60 * 60)}))\n |> yield(name: \"Trend\")"}],"shade":true,"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":18},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"lQ75rvTyd2Lq5pZjzy6LB","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"KLfpRZtiEnU2GxjPtrrzQ","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"1kLynwKxvJ3B5IeJnrBqp","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka EPS","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_topics\")\n |> filter(fn: (r) => r[\"_field\"] == \"MessagesInPerSec.Count\")\n |> derivative(unit: 1s, nonNegative: true)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_topics\")\n |> filter(fn: (r) => r[\"_field\"] == \"MessagesInPerSec.Count\")\n |> derivative(unit: 1s, nonNegative: true)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":22},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"System CPU Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: r._value * -1.0 + 100.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\",\"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: r._value * -1.0 + 100.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":26},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"QDwChKZWuQV0BaJcEeSam","name":"Atlantis","type":"scale","hex":"#74D495"},{"id":"ThD0WTqKHltQEVlq9mo6K","name":"Atlantis","type":"scale","hex":"#3F3FBA"},{"id":"FBHYZiwDLKyQK3eRfUD-0","name":"Atlantis","type":"scale","hex":"#FF4D9E"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"System Memory Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":30},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Monitor Interface Traffic - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"bytes_recv\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":34},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Management Interface Traffic - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"bytes_recv\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":6,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":38},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Stenographer Packet Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"stenodrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"stenodrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":42},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Disk Usage /","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":46},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"5m Load Average","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"load5\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"width":1,"xPos":1},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"},{"id":"z83MTSufTrlrCoEPiBXda","name":"tiger","type":"text","hex":"#F48D38","value":1}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Warning Alarms","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> filter(fn: (r) => r[\"_level\"] == \"warn\")\n |> count()"}],"staticLegend":{},"suffix":" ","width":1,"xPos":1,"yPos":2},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"IO Wait","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_iowait\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":2},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"},{"id":"z83MTSufTrlrCoEPiBXda","name":"laser","type":"text","hex":"#00C9FF","value":1}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Informative Alarms","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> filter(fn: (r) => r[\"_level\"] == \"info\")\n |> count()"}],"staticLegend":{},"suffix":" ","width":1,"xPos":2,"yPos":2},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Estimated EPS In","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"in\")\n |> hostFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"width":1,"xPos":3},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"pineapple","type":"threshold","hex":"#FFB94A","value":70},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"fire","type":"threshold","hex":"#DC4E58","value":80},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"CPU Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> map(fn: (r) => ({r with _value: r._value * -1.0 + 100.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":3,"yPos":2},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"kOQLOg2H4FVEE-E1_L8Kq","name":"laser","type":"threshold","hex":"#00C9FF","value":85},{"id":"5IArg2lDb8KvnphywgUXa","name":"tiger","type":"threshold","hex":"#F48D38","value":90},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"ruby","type":"threshold","hex":"#BF3D5E","value":95},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"Root Disk Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":3,"yPos":6},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Suricata Packet Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"suridrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"suridrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","xPos":3,"yCol":"_value","yPos":42},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Redis Queue","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"redisqueue\")\n |> filter(fn: (r) => r[\"_field\"] == \"unparsed\")\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"width":1,"xPos":4},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Elasticsearch Document Count","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"docs_count\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"mean\")"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"docs_count\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":10},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"heightRatio":0.301556420233463,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Redis Queue","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"redisqueue\")\n |> filter(fn: (r) => r[\"_field\"] == \"unparsed\")\n |> group(columns: [\"host\", \"_field\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"redisqueue\")\n |> filter(fn: (r) => r[\"_field\"] == \"unparsed\")\n |> group(columns: [\"host\", \"_field\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.301556420233463,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":14},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":" days"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container Uptime","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_status\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime_ns\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> group(columns: [\"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24 * 60 * 60 * 1000000000)}))\n |> yield(name: \"last\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_status\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime_ns\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> group(columns: [\"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24.0 * 60.0 * 60.0 * 1000000000.0)}))\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":18},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"hoverDimension":"auto","kind":"Single_Stat_Plus_Line","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka Active Controllers","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveControllerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: last, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"current\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveControllerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":22},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"hoverDimension":"auto","kind":"Single_Stat_Plus_Line","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka Active Brokers","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveBrokerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"trend\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveBrokerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"current\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":24},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"yT5vTIlaaFChSrQvKLfqf","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"mzzUVSu3ibTph1JmQmDAQ","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"mOcnDo7l8ii6qNLFIB5rs","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container CPU Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"mean\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":26},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"QDwChKZWuQV0BaJcEeSam","name":"Atlantis","type":"scale","hex":"#74D495"},{"id":"ThD0WTqKHltQEVlq9mo6K","name":"Atlantis","type":"scale","hex":"#3F3FBA"},{"id":"FBHYZiwDLKyQK3eRfUD-0","name":"Atlantis","type":"scale","hex":"#FF4D9E"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container Memory Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"mean\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":30},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b"}],"colorizeRows":true,"colors":[{"id":"0ynR6Zs0wuQ3WY0Lz-_KC","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"YiArehCNBwFm9mn8DSXSG","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"DxByY_EQW9Xs2jD5ktkG5","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container Traffic - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_net\")\n |> filter(fn: (r) => r[\"_field\"] == \"rx_bytes\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"mean\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_net\")\n |> filter(fn: (r) => r[\"_field\"] == \"rx_bytes\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with _value: r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":34},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Disk Usage /nsm","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xPos":4,"yPos":46},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Inbound Traffic","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\") \n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: r._value * 8.0 / (1000.0 * 1000.0)}))\n |> group(columns: [\"host\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> last()\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":" Mb/s","width":1,"xPos":5},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Inbound Drops","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop_in\") \n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: r._value * 8.0 / (1000.0 * 1000.0)}))\n |> group(columns: [\"host\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> last()\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":" Mb/s","width":1,"xPos":6},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"pineapple","type":"threshold","hex":"#FFB94A","value":70},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"fire","type":"threshold","hex":"#DC4E58","value":80},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"Memory Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":6,"yPos":2},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"laser","type":"threshold","hex":"#00C9FF","value":85},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"tiger","type":"threshold","hex":"#F48D38","value":90},{"id":"H7uprvKmMEh39en6X-ms_","name":"ruby","type":"threshold","hex":"#BF3D5E","value":95},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"NSM Disk Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":6,"yPos":6},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Management Interface Traffic - Outbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_sent\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n \n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"bytes_sent\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_sent\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n \n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":6,"widthRatio":1,"xCol":"_time","xPos":6,"yCol":"_value","yPos":38},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Zeek Packet Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekdrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekdrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","xPos":6,"yCol":"_value","yPos":42},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Capture Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekcaptureloss\")\n |> filter(fn: (r) => r[\"_field\"] == \"loss\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":7},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Zeek Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekdrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":8},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"s"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"heightRatio":0.301556420233463,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Elastic Ingest Time Spent","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_community_id_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"community.id_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_conditional_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"conditional_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_date_index_name_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"date.index.name_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_date_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"date_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_dissect_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"dissect_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_dot_expander_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"dot.expander_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_geoip_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"geoip_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_grok_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"grok_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_json_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"json_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_kv_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"kv_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_lowercase_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"lowercase_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_rename_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"rename_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_script_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"script_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_user_agent_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"user.agent_time\")"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.301556420233463,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":10},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"1m Load Average","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"load1\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"load1\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\",\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":14,"yTickStep":1},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":" e/s"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"heightRatio":0.301556420233463,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Logstash EPS","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"in\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"out\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: -r._value}))\n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"in\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"out\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: -r._value}))\n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.301556420233463,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":18},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":4,"hoverDimension":"auto","kind":"Single_Stat_Plus_Line","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka Under Replicated Partitions","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_partition\")\n |> filter(fn: (r) => r[\"_field\"] == \"UnderReplicatedPartitions\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"partition\",\"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_partition\")\n |> filter(fn: (r) => r[\"_field\"] == \"UnderReplicatedPartitions\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"partition\",\"host\", \"role\"])\n |> yield(name: \"trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":22},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"UAehjIsi65P8u92M_3sQY","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"_SCP8Npp4NVMx2N4mfuzX","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"BoMPg4R1KDp_UsRORdV3_","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"IO Wait","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_iowait\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_iowait\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":26},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"QDwChKZWuQV0BaJcEeSam","name":"Atlantis","type":"scale","hex":"#74D495"},{"id":"ThD0WTqKHltQEVlq9mo6K","name":"Atlantis","type":"scale","hex":"#3F3FBA"},{"id":"FBHYZiwDLKyQK3eRfUD-0","name":"Atlantis","type":"scale","hex":"#FF4D9E"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Swap Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"swap\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"swap\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":30},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Monitor Interface Drops - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop_in\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"drop_in\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop_in\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":34},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":" days"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Stenographer PCAP Retention","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> map(fn: (r) => ({ r with _value: r._value / (24.0 * 3600.0)}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])"},{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> map(fn: (r) => ({ r with _value: r._value / (24.0 * 3600.0)}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":46},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Suricata Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"suridrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":9},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"pineapple","type":"threshold","hex":"#FFB94A","value":50},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"fire","type":"threshold","hex":"#DC4E58","value":70},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"Swap Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"swap\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":9,"yPos":2},{"colors":[{"id":"base","name":"white","type":"text","hex":"#ffffff"}],"fieldOptions":[{"displayName":"Host","fieldName":"host","visible":true},{"displayName":"Name","fieldName":"container_name","visible":true},{"displayName":"Status","fieldName":"container_status","visible":true},{"displayName":"OOM Killed","fieldName":"_value","visible":true},{"displayName":"_start","fieldName":"_start","visible":true},{"displayName":"_stop","fieldName":"_stop","visible":true},{"displayName":"_time","fieldName":"_time","visible":true},{"displayName":"_field","fieldName":"_field","visible":true},{"displayName":"_measurement","fieldName":"_measurement","visible":true},{"displayName":"engine_host","fieldName":"engine_host","visible":true},{"displayName":"role","fieldName":"role","visible":true},{"displayName":"server_version","fieldName":"server_version","visible":true},{"displayName":"container_image","fieldName":"container_image","visible":true},{"displayName":"container_version","fieldName":"container_version","visible":true},{"displayName":"description","fieldName":"description","visible":true},{"displayName":"maintainer","fieldName":"maintainer","visible":true},{"displayName":"io.k8s.description","fieldName":"io.k8s.description","visible":true},{"displayName":"io.k8s.display-name","fieldName":"io.k8s.display-name","visible":true},{"displayName":"license","fieldName":"license","visible":true},{"displayName":"name","fieldName":"name","visible":true},{"displayName":"org.label-schema.build-date","fieldName":"org.label-schema.build-date","visible":true},{"displayName":"org.label-schema.license","fieldName":"org.label-schema.license","visible":true},{"displayName":"org.label-schema.name","fieldName":"org.label-schema.name","visible":true},{"displayName":"org.label-schema.schema-version","fieldName":"org.label-schema.schema-version","visible":true},{"displayName":"org.label-schema.url","fieldName":"org.label-schema.url","visible":true},{"displayName":"org.label-schema.vcs-ref","fieldName":"org.label-schema.vcs-ref","visible":true},{"displayName":"org.label-schema.vcs-url","fieldName":"org.label-schema.vcs-url","visible":true},{"displayName":"org.label-schema.vendor","fieldName":"org.label-schema.vendor","visible":true},{"displayName":"org.label-schema.version","fieldName":"org.label-schema.version","visible":true},{"displayName":"org.opencontainers.image.created","fieldName":"org.opencontainers.image.created","visible":true},{"displayName":"org.opencontainers.image.licenses","fieldName":"org.opencontainers.image.licenses","visible":true},{"displayName":"org.opencontainers.image.title","fieldName":"org.opencontainers.image.title","visible":true},{"displayName":"org.opencontainers.image.vendor","fieldName":"org.opencontainers.image.vendor","visible":true},{"displayName":"release","fieldName":"release","visible":true},{"displayName":"summary","fieldName":"summary","visible":true},{"displayName":"url","fieldName":"url","visible":true},{"displayName":"vendor","fieldName":"vendor","visible":true},{"displayName":"version","fieldName":"version","visible":true},{"displayName":"org.label-schema.usage","fieldName":"org.label-schema.usage","visible":true},{"displayName":"org.opencontainers.image.documentation","fieldName":"org.opencontainers.image.documentation","visible":true},{"displayName":"org.opencontainers.image.revision","fieldName":"org.opencontainers.image.revision","visible":true},{"displayName":"org.opencontainers.image.source","fieldName":"org.opencontainers.image.source","visible":true},{"displayName":"org.opencontainers.image.url","fieldName":"org.opencontainers.image.url","visible":true},{"displayName":"org.opencontainers.image.version","fieldName":"org.opencontainers.image.version","visible":true},{"displayName":"org.opencontainers.image.description","fieldName":"org.opencontainers.image.description","visible":true}],"height":4,"kind":"Table","name":"Most Recent Container Events","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_status\")\n |> filter(fn: (r) => r[\"_field\"] == \"oomkilled\")\n |> filter(fn: (r) => r[\"container_status\"] != \"running\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"container_name\", \"host\"])\n |> last()\n |> group()\n |> keep(columns: [\"_value\", \"container_name\", \"host\", \"container_status\"])"}],"staticLegend":{},"tableOptions":{"sortBy":"container_name","verticalTimeAxis":true},"timeFormat":"YYYY-MM-DD HH:mm:ss","width":3,"xPos":9,"yPos":6},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Zeek Capture Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekcaptureloss\")\n |> filter(fn: (r) => r[\"_field\"] == \"loss\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekcaptureloss\")\n |> filter(fn: (r) => r[\"_field\"] == \"loss\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","xPos":9,"yCol":"_value","yPos":42},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Stenographer Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"stenodrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":10},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"PCAP Retention","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> hostFilter()\n |> map(fn: (r) => ({r with _value: r._value / (24.0 * 60.0 * 60.0)}))\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":" days","width":1,"xPos":11}],"description":"Visualize the Security Onion grid performance metrics and alarm statuses.","name":"Security Onion Performance edit"}}] \ No newline at end of file +[{"apiVersion":"influxdata.com/v2alpha1","kind":"Dashboard","metadata":{"name":"vivid-wilson-002001"},"spec":{"charts":[{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Uptime","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> hostFilter()\n |> map(fn: (r) => ({r with _value: r._value / (24 * 60 * 60)}))\n |> group(columns: [\"host\"])\n |> last()\n |> lowestMin(n:1)"}],"staticLegend":{},"suffix":" days","width":1},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"},{"id":"z83MTSufTrlrCoEPiBXda","name":"ruby","type":"text","hex":"#BF3D5E","value":1}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Critical Alarms","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> filter(fn: (r) => r[\"_level\"] == \"crit\")\n |> count()"}],"staticLegend":{},"suffix":" ","width":1,"yPos":2},{"colors":[{"id":"base","name":"rainforest","type":"text","hex":"#4ED8A0"},{"id":"QCTYWuGuHkikYFsZSKMzQ","name":"rainforest","type":"text","hex":"#4ED8A0"},{"id":"QdpMyTRBb0LJ56-P5wfAW","name":"laser","type":"text","hex":"#00C9FF","value":1},{"id":"VQGwCoMrxZyP8asiOW5Cq","name":"tiger","type":"text","hex":"#F48D38","value":2},{"id":"zSO9QkesSIxrU_ntCBx2i","name":"ruby","type":"text","hex":"#BF3D5E","value":3}],"fieldOptions":[{"fieldName":"_time","visible":true},{"displayName":"Alarm","fieldName":"_check_name","visible":true},{"displayName":"Severity","fieldName":"_value","visible":true},{"displayName":"Status","fieldName":"_level","visible":true}],"height":6,"kind":"Table","name":"Alarm Status","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> drop(columns: [\"_value\"])\n |> duplicate(column: \"_level\", as: \"_value\")\n |> map(fn: (r) => ({ r with _value: if r._value == \"ok\" then 0 else if r._value == \"info\" then 1 else if r._value == \"warn\" then 2 else 3 }))\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> keep(columns: [\"_check_name\",\"_level\",\"_value\"])"}],"staticLegend":{},"tableOptions":{"sortBy":"_check_name","verticalTimeAxis":true},"timeFormat":"YYYY-MM-DD HH:mm:ss","width":3,"yPos":4},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"B"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Elasticsearch Storage Size","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"store_size_in_bytes\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"mean\")"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"store_size_in_bytes\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":10},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"B"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"InfluxDB Size","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"influxsize\")\n |> filter(fn: (r) => r[\"_field\"] == \"kbytes\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 1000.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"mean\")"},{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"influxsize\")\n |> filter(fn: (r) => r[\"_field\"] == \"kbytes\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 1000.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":14},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":" days"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"System Uptime","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24 * 60 * 60)}))\n |> yield(name: \"last\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24 * 60 * 60)}))\n |> yield(name: \"Trend\")"}],"shade":true,"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":18},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"lQ75rvTyd2Lq5pZjzy6LB","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"KLfpRZtiEnU2GxjPtrrzQ","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"1kLynwKxvJ3B5IeJnrBqp","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka EPS","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_topics\")\n |> filter(fn: (r) => r[\"_field\"] == \"MessagesInPerSec.Count\")\n |> derivative(unit: 1s, nonNegative: true)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_topics\")\n |> filter(fn: (r) => r[\"_field\"] == \"MessagesInPerSec.Count\")\n |> derivative(unit: 1s, nonNegative: true)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":22},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"System CPU Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: r._value * -1.0 + 100.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\",\"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: r._value * -1.0 + 100.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":26},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"QDwChKZWuQV0BaJcEeSam","name":"Atlantis","type":"scale","hex":"#74D495"},{"id":"ThD0WTqKHltQEVlq9mo6K","name":"Atlantis","type":"scale","hex":"#3F3FBA"},{"id":"FBHYZiwDLKyQK3eRfUD-0","name":"Atlantis","type":"scale","hex":"#FF4D9E"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"System Memory Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":30},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Monitor Interface Traffic - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"bytes_recv\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":34},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Management Interface Traffic - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"bytes_recv\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":6,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":38},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Stenographer Packet Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"stenodrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"stenodrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":42},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Disk Usage /","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":46},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"5m Load Average","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"load5\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"width":1,"xPos":1},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"},{"id":"z83MTSufTrlrCoEPiBXda","name":"tiger","type":"text","hex":"#F48D38","value":1}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Warning Alarms","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> filter(fn: (r) => r[\"_level\"] == \"warn\")\n |> count()"}],"staticLegend":{},"suffix":" ","width":1,"xPos":1,"yPos":2},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"IO Wait","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_iowait\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":2},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"},{"id":"z83MTSufTrlrCoEPiBXda","name":"laser","type":"text","hex":"#00C9FF","value":1}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Informative Alarms","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> filter(fn: (r) => r[\"_level\"] == \"info\")\n |> count()"}],"staticLegend":{},"suffix":" ","width":1,"xPos":2,"yPos":2},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Estimated EPS In","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"in\")\n |> hostFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"width":1,"xPos":3},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"pineapple","type":"threshold","hex":"#FFB94A","value":70},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"fire","type":"threshold","hex":"#DC4E58","value":80},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"CPU Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> map(fn: (r) => ({r with _value: r._value * -1.0 + 100.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":3,"yPos":2},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"kOQLOg2H4FVEE-E1_L8Kq","name":"laser","type":"threshold","hex":"#00C9FF","value":85},{"id":"5IArg2lDb8KvnphywgUXa","name":"tiger","type":"threshold","hex":"#F48D38","value":90},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"ruby","type":"threshold","hex":"#BF3D5E","value":95},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"Root Disk Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":3,"yPos":6},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Suricata Packet Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"suridrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"suridrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","xPos":3,"yCol":"_value","yPos":42},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Redis Queue","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"redisqueue\")\n |> filter(fn: (r) => r[\"_field\"] == \"unparsed\")\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"width":1,"xPos":4},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Elasticsearch Document Count","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"docs_count\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"mean\")"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"docs_count\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":10},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"heightRatio":0.301556420233463,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Redis Queue","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"redisqueue\")\n |> filter(fn: (r) => r[\"_field\"] == \"unparsed\")\n |> group(columns: [\"host\", \"_field\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"redisqueue\")\n |> filter(fn: (r) => r[\"_field\"] == \"unparsed\")\n |> group(columns: [\"host\", \"_field\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.301556420233463,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":14},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":" days"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container Uptime","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_status\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime_ns\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> group(columns: [\"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24 * 60 * 60 * 1000000000)}))\n |> yield(name: \"last\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_status\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime_ns\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> group(columns: [\"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24.0 * 60.0 * 60.0 * 1000000000.0)}))\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":18},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"hoverDimension":"auto","kind":"Single_Stat_Plus_Line","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka Active Controllers","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveControllerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: last, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"current\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveControllerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":22},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"hoverDimension":"auto","kind":"Single_Stat_Plus_Line","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka Active Brokers","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveBrokerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"trend\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveBrokerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"current\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":24},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"yT5vTIlaaFChSrQvKLfqf","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"mzzUVSu3ibTph1JmQmDAQ","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"mOcnDo7l8ii6qNLFIB5rs","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container CPU Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"mean\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":26},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"QDwChKZWuQV0BaJcEeSam","name":"Atlantis","type":"scale","hex":"#74D495"},{"id":"ThD0WTqKHltQEVlq9mo6K","name":"Atlantis","type":"scale","hex":"#3F3FBA"},{"id":"FBHYZiwDLKyQK3eRfUD-0","name":"Atlantis","type":"scale","hex":"#FF4D9E"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container Memory Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"mean\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":30},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b"}],"colorizeRows":true,"colors":[{"id":"0ynR6Zs0wuQ3WY0Lz-_KC","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"YiArehCNBwFm9mn8DSXSG","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"DxByY_EQW9Xs2jD5ktkG5","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container Traffic - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_net\")\n |> filter(fn: (r) => r[\"_field\"] == \"rx_bytes\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"mean\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_net\")\n |> filter(fn: (r) => r[\"_field\"] == \"rx_bytes\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with _value: r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":34},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Disk Usage /nsm","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xPos":4,"yPos":46},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Inbound Traffic","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\") \n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: r._value * 8.0 / (1000.0 * 1000.0)}))\n |> group(columns: [\"host\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> last()\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":" Mb/s","width":1,"xPos":5},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Inbound Drops","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop_in\") \n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: r._value * 8.0 / (1000.0 * 1000.0)}))\n |> group(columns: [\"host\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> last()\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":" Mb/s","width":1,"xPos":6},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"pineapple","type":"threshold","hex":"#FFB94A","value":70},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"fire","type":"threshold","hex":"#DC4E58","value":80},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"Memory Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":6,"yPos":2},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"laser","type":"threshold","hex":"#00C9FF","value":85},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"tiger","type":"threshold","hex":"#F48D38","value":90},{"id":"H7uprvKmMEh39en6X-ms_","name":"ruby","type":"threshold","hex":"#BF3D5E","value":95},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"NSM Disk Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":6,"yPos":6},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Management Interface Traffic - Outbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_sent\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n \n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"bytes_sent\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_sent\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n \n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":6,"widthRatio":1,"xCol":"_time","xPos":6,"yCol":"_value","yPos":38},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Zeek Packet Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekdrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekdrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","xPos":6,"yCol":"_value","yPos":42},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Capture Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekcaptureloss\")\n |> filter(fn: (r) => r[\"_field\"] == \"loss\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":7},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Zeek Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekdrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":8},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"s"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"heightRatio":0.301556420233463,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Elastic Ingest Time Spent","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_community_id_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"community.id_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_conditional_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"conditional_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_date_index_name_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"date.index.name_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_date_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"date_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_dissect_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"dissect_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_dot_expander_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"dot.expander_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_geoip_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"geoip_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_grok_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"grok_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_json_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"json_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_kv_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"kv_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_lowercase_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"lowercase_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_rename_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"rename_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_script_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"script_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_user_agent_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"user.agent_time\")"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.301556420233463,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":10},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"1m Load Average","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"load1\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"load1\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\",\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":14,"yTickStep":1},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":" e/s"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"heightRatio":0.301556420233463,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Logstash EPS","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"in\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"out\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: -r._value}))\n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"in\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"out\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: -r._value}))\n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.301556420233463,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":18},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":4,"hoverDimension":"auto","kind":"Single_Stat_Plus_Line","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka Under Replicated Partitions","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_partition\")\n |> filter(fn: (r) => r[\"_field\"] == \"UnderReplicatedPartitions\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"partition\",\"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_partition\")\n |> filter(fn: (r) => r[\"_field\"] == \"UnderReplicatedPartitions\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"partition\",\"host\", \"role\"])\n |> yield(name: \"trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":22},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"UAehjIsi65P8u92M_3sQY","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"_SCP8Npp4NVMx2N4mfuzX","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"BoMPg4R1KDp_UsRORdV3_","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"IO Wait","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_iowait\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_iowait\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":26},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"QDwChKZWuQV0BaJcEeSam","name":"Atlantis","type":"scale","hex":"#74D495"},{"id":"ThD0WTqKHltQEVlq9mo6K","name":"Atlantis","type":"scale","hex":"#3F3FBA"},{"id":"FBHYZiwDLKyQK3eRfUD-0","name":"Atlantis","type":"scale","hex":"#FF4D9E"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Swap Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"swap\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"swap\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":30},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Monitor Interface Drops - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop_in\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"drop_in\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop_in\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":34},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":" days"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Stenographer PCAP Retention","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> map(fn: (r) => ({ r with _value: r._value / (24.0 * 3600.0)}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])"},{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> map(fn: (r) => ({ r with _value: r._value / (24.0 * 3600.0)}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":46},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Suricata Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"suridrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":9},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"pineapple","type":"threshold","hex":"#FFB94A","value":50},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"fire","type":"threshold","hex":"#DC4E58","value":70},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"Swap Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"swap\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":9,"yPos":2},{"colors":[{"id":"base","name":"white","type":"text","hex":"#ffffff"}],"fieldOptions":[{"displayName":"Host","fieldName":"host","visible":true},{"displayName":"Name","fieldName":"container_name","visible":true},{"displayName":"Status","fieldName":"container_status","visible":true},{"displayName":"OOM Killed","fieldName":"_value","visible":true},{"displayName":"_start","fieldName":"_start","visible":true},{"displayName":"_stop","fieldName":"_stop","visible":true},{"displayName":"_time","fieldName":"_time","visible":true},{"displayName":"_field","fieldName":"_field","visible":true},{"displayName":"_measurement","fieldName":"_measurement","visible":true},{"displayName":"engine_host","fieldName":"engine_host","visible":true},{"displayName":"role","fieldName":"role","visible":true},{"displayName":"server_version","fieldName":"server_version","visible":true},{"displayName":"container_image","fieldName":"container_image","visible":true},{"displayName":"container_version","fieldName":"container_version","visible":true},{"displayName":"description","fieldName":"description","visible":true},{"displayName":"maintainer","fieldName":"maintainer","visible":true},{"displayName":"io.k8s.description","fieldName":"io.k8s.description","visible":true},{"displayName":"io.k8s.display-name","fieldName":"io.k8s.display-name","visible":true},{"displayName":"license","fieldName":"license","visible":true},{"displayName":"name","fieldName":"name","visible":true},{"displayName":"org.label-schema.build-date","fieldName":"org.label-schema.build-date","visible":true},{"displayName":"org.label-schema.license","fieldName":"org.label-schema.license","visible":true},{"displayName":"org.label-schema.name","fieldName":"org.label-schema.name","visible":true},{"displayName":"org.label-schema.schema-version","fieldName":"org.label-schema.schema-version","visible":true},{"displayName":"org.label-schema.url","fieldName":"org.label-schema.url","visible":true},{"displayName":"org.label-schema.vcs-ref","fieldName":"org.label-schema.vcs-ref","visible":true},{"displayName":"org.label-schema.vcs-url","fieldName":"org.label-schema.vcs-url","visible":true},{"displayName":"org.label-schema.vendor","fieldName":"org.label-schema.vendor","visible":true},{"displayName":"org.label-schema.version","fieldName":"org.label-schema.version","visible":true},{"displayName":"org.opencontainers.image.created","fieldName":"org.opencontainers.image.created","visible":true},{"displayName":"org.opencontainers.image.licenses","fieldName":"org.opencontainers.image.licenses","visible":true},{"displayName":"org.opencontainers.image.title","fieldName":"org.opencontainers.image.title","visible":true},{"displayName":"org.opencontainers.image.vendor","fieldName":"org.opencontainers.image.vendor","visible":true},{"displayName":"release","fieldName":"release","visible":true},{"displayName":"summary","fieldName":"summary","visible":true},{"displayName":"url","fieldName":"url","visible":true},{"displayName":"vendor","fieldName":"vendor","visible":true},{"displayName":"version","fieldName":"version","visible":true},{"displayName":"org.label-schema.usage","fieldName":"org.label-schema.usage","visible":true},{"displayName":"org.opencontainers.image.documentation","fieldName":"org.opencontainers.image.documentation","visible":true},{"displayName":"org.opencontainers.image.revision","fieldName":"org.opencontainers.image.revision","visible":true},{"displayName":"org.opencontainers.image.source","fieldName":"org.opencontainers.image.source","visible":true},{"displayName":"org.opencontainers.image.url","fieldName":"org.opencontainers.image.url","visible":true},{"displayName":"org.opencontainers.image.version","fieldName":"org.opencontainers.image.version","visible":true},{"displayName":"org.opencontainers.image.description","fieldName":"org.opencontainers.image.description","visible":true}],"height":4,"kind":"Table","name":"Most Recent Container Events","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_status\")\n |> filter(fn: (r) => r[\"_field\"] == \"oomkilled\")\n |> filter(fn: (r) => r[\"container_status\"] != \"running\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"container_name\", \"host\"])\n |> last()\n |> group()\n |> keep(columns: [\"_value\", \"container_name\", \"host\", \"container_status\"])"}],"staticLegend":{},"tableOptions":{"sortBy":"container_name","verticalTimeAxis":true},"timeFormat":"YYYY-MM-DD HH:mm:ss","width":3,"xPos":9,"yPos":6},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Zeek Capture Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekcaptureloss\")\n |> filter(fn: (r) => r[\"_field\"] == \"loss\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekcaptureloss\")\n |> filter(fn: (r) => r[\"_field\"] == \"loss\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","xPos":9,"yCol":"_value","yPos":42},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Stenographer Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"stenodrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":10},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"PCAP Retention","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> hostFilter()\n |> map(fn: (r) => ({r with _value: r._value / (24.0 * 60.0 * 60.0)}))\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":" days","width":1,"xPos":11}],"description":"Visualize the Security Onion grid performance metrics and alarm statuses.","name":"Security Onion Performance"}}] \ No newline at end of file From 59097070efc3391469418ff25beb77c04863e33e Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 28 May 2024 12:17:43 -0400 Subject: [PATCH 641/777] Revert "Remove unneeded jolokia aggregate metrics to reduce data ingested to influx" This reverts commit 1c1a1a1d3fc0138b3df81c19d61e80a9f4f62ec3. --- salt/telegraf/etc/telegraf.conf | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/salt/telegraf/etc/telegraf.conf b/salt/telegraf/etc/telegraf.conf index ecfb0730a..1e8d9d3fe 100644 --- a/salt/telegraf/etc/telegraf.conf +++ b/salt/telegraf/etc/telegraf.conf @@ -247,25 +247,6 @@ [[inputs.jolokia2_agent]] name_prefix= "kafka_" urls = ["http://localhost:8778/jolokia"] - fieldexclude = [ - "*.EventType", - "*.FifteenMinuteRate", - "*.FiveMinuteRate", - "*.MeanRate", - "*.OneMinuteRate", - "*.RateUnit", - "*.LatencyUnit", - "*.50thPercentile", - "*.75thPercentile", - "*.95thPercentile", - "*.98thPercentile", - "*.99thPercentile", - "*.999thPercentile", - "*.Min", - "*.Mean", - "*.Max", - "*.StdDev" - ] [[inputs.jolokia2_agent.metric]] name = "topics" From 0d034e7adcacc414cce6531f95e94da65ee9f56e Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Wed, 29 May 2024 10:55:56 -0400 Subject: [PATCH 642/777] fix rsync --- salt/manager/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index b8297ad44..1e9585987 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -698,7 +698,7 @@ playbook_migration() { read -r echo "Backing up the Elastalert rules..." - rsync -av --stats /opt/so/rules/elastalert/playbook/*.{yaml,yml} /nsm/backup/detections-migration/elastalert/ + rsync -av --ignore-missing-args --stats /opt/so/rules/elastalert/playbook/*.{yaml,yml} /nsm/backup/detections-migration/elastalert/ # Verify that rsync completed successfully if [[ $? -eq 0 ]]; then From e98b8566c942a4557b9ab33ced61524f4d2e725c Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 29 May 2024 14:50:22 -0400 Subject: [PATCH 643/777] 2.4.70 --- DOWNLOAD_AND_VERIFY_ISO.md | 22 ++++++++++----------- sigs/securityonion-2.4.70-20240529.iso.sig | Bin 0 -> 566 bytes 2 files changed, 11 insertions(+), 11 deletions(-) create mode 100644 sigs/securityonion-2.4.70-20240529.iso.sig diff --git a/DOWNLOAD_AND_VERIFY_ISO.md b/DOWNLOAD_AND_VERIFY_ISO.md index 4493f210d..fcefce469 100644 --- a/DOWNLOAD_AND_VERIFY_ISO.md +++ b/DOWNLOAD_AND_VERIFY_ISO.md @@ -1,17 +1,17 @@ -### 2.4.60-20240320 ISO image released on 2024/03/20 +### 2.4.70-20240529 ISO image released on 2024/03/20 ### Download and Verify -2.4.60-20240320 ISO image: -https://download.securityonion.net/file/securityonion/securityonion-2.4.60-20240320.iso +2.4.70-20240529 ISO image: +https://download.securityonion.net/file/securityonion/securityonion-2.4.70-20240529.iso -MD5: 178DD42D06B2F32F3870E0C27219821E -SHA1: 73EDCD50817A7F6003FE405CF1808A30D034F89D -SHA256: DD334B8D7088A7B78160C253B680D645E25984BA5CCAB5CC5C327CA72137FC06 +MD5: 8FCCF31C2470D1ABA380AF196B611DEC +SHA1: EE5E8F8C14819E7A1FE423E6920531A97F39600B +SHA256: EF5E781D50D50660F452ADC54FD4911296ECBECED7879FA8E04687337CA89BEC Signature for ISO image: -https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.60-20240320.iso.sig +https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.70-20240529.iso.sig Signing key: https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS @@ -25,22 +25,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2. Download the signature file for the ISO: ``` -wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.60-20240320.iso.sig +wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.70-20240529.iso.sig ``` Download the ISO image: ``` -wget https://download.securityonion.net/file/securityonion/securityonion-2.4.60-20240320.iso +wget https://download.securityonion.net/file/securityonion/securityonion-2.4.70-20240529.iso ``` Verify the downloaded ISO image using the signature file: ``` -gpg --verify securityonion-2.4.60-20240320.iso.sig securityonion-2.4.60-20240320.iso +gpg --verify securityonion-2.4.70-20240529.iso.sig securityonion-2.4.70-20240529.iso ``` The output should show "Good signature" and the Primary key fingerprint should match what's shown below: ``` -gpg: Signature made Tue 19 Mar 2024 03:17:58 PM EDT using RSA key ID FE507013 +gpg: Signature made Wed 29 May 2024 11:40:59 AM EDT using RSA key ID FE507013 gpg: Good signature from "Security Onion Solutions, LLC " gpg: WARNING: This key is not certified with a trusted signature! gpg: There is no indication that the signature belongs to the owner. diff --git a/sigs/securityonion-2.4.70-20240529.iso.sig b/sigs/securityonion-2.4.70-20240529.iso.sig new file mode 100644 index 0000000000000000000000000000000000000000..c3825eb6e72a860f67708c45b3926d7fdc90b650 GIT binary patch literal 566 zcmV-60?GY}0y6{v0SEvc79j-41gSkXz6^6dp_W8^5Ma0dP;e6k0%liCivS7<5PT3| zxBgIY6IO;00GUA4^4cz_ff*If5}A0oP7{X5>Z=GC&kVQad2loDR}h58qGE)=Rx^yp z-+xRh;q#;TL)Clt%_Pqf`(+ZrTTxv(K=m6LP%GfSK4WmEzCYmDGF{x`Aax-QCl14M?B(?m6}F< z8ytoSd0DJjf3dNdprN}#!)CDM%UoA1<6_PF(m7Ed~UxN<0?0bGK zYZmg%ne6j4P621BW!vQmH~#^`e1^aJc(KS3%i>sop?;h9?k z$XnSrS90)^;tYA8-5UvbUO{ZLYxgUGGhIA}2I}y$to?u$rExzvc+R*l{a!nVSe@G} zA9x@C>(y#~9Pmj<<|Xctul~dkY1TD+#W?!T-m;neL){fdtG)HlsOCQTaX0s?y^bC10U|My|kDl##27 Date: Wed, 29 May 2024 14:52:47 -0400 Subject: [PATCH 644/777] 2.4.70 --- DOWNLOAD_AND_VERIFY_ISO.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DOWNLOAD_AND_VERIFY_ISO.md b/DOWNLOAD_AND_VERIFY_ISO.md index fcefce469..a5fd6e157 100644 --- a/DOWNLOAD_AND_VERIFY_ISO.md +++ b/DOWNLOAD_AND_VERIFY_ISO.md @@ -1,4 +1,4 @@ -### 2.4.70-20240529 ISO image released on 2024/03/20 +### 2.4.70-20240529 ISO image released on 2024/05/29 ### Download and Verify From 876d86048834c0dcbdf032f5f2cfa7fe61b52967 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 29 May 2024 16:40:15 -0400 Subject: [PATCH 645/777] elastic agent should be able to communicate over 9092 for sending logs to kafka brokers Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/firewall/defaults.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/firewall/defaults.yaml b/salt/firewall/defaults.yaml index 6dd3fead3..9d943ccab 100644 --- a/salt/firewall/defaults.yaml +++ b/salt/firewall/defaults.yaml @@ -77,6 +77,7 @@ firewall: elastic_agent_data: tcp: - 5055 + - 9092 udp: [] elastic_agent_update: tcp: From d9ec556061bbef5fe3a6215d0707bc6746182427 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 29 May 2024 16:41:02 -0400 Subject: [PATCH 646/777] Update some annotations and defaults Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/defaults.yaml | 7 ++++++- salt/kafka/soc_kafka.yaml | 10 ++++++---- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/salt/kafka/defaults.yaml b/salt/kafka/defaults.yaml index 9a8c05c43..56ad9252f 100644 --- a/salt/kafka/defaults.yaml +++ b/salt/kafka/defaults.yaml @@ -50,4 +50,9 @@ kafka: log_x_retention_x_hours: 168 log_x_segment_x_bytes: 1073741824 node_x_id: - process_x_roles: controller \ No newline at end of file + process_x_roles: controller + ssl_x_keystore_x_location: /etc/pki/kafka.p12 + ssl_x_keystore_x_type: PKCS12 + ssl_x_keystore_x_password: changeit + ssl_x_truststore_x_location: /etc/pki/java/sos/cacerts + ssl_x_truststore_x_password: changeit \ No newline at end of file diff --git a/salt/kafka/soc_kafka.yaml b/salt/kafka/soc_kafka.yaml index ba673fa68..b1de1f243 100644 --- a/salt/kafka/soc_kafka.yaml +++ b/salt/kafka/soc_kafka.yaml @@ -1,6 +1,6 @@ kafka: enabled: - description: Enable or disable Kafka. + description: Enable or disable Kafka. Recommended to have desired configuration staged prior to enabling Kafka. Join all receiver nodes to grid that will be converted to Kafka nodes, configure kafka_controllers with the hostnames of the nodes you want to act as controllers, and configure the default_replication_factor to the desired value for your redundancy needs. helpLink: kafka.html cluster_id: description: The ID of the Kafka cluster. @@ -13,7 +13,9 @@ kafka: sensitive: True helpLink: kafka.html kafka_controllers: - description: A list of Security Onion grid members that should act as KRaft controllers for this Kafka cluster. By default, the grid manager will use a 'combined' role where it will act as both a broker and controller. All other nodes will default to broker roles. + description: A list of Security Onion grid members that should act as controllers for this Kafka cluster. By default, the grid manager will use a 'combined' role where it will act as both a broker and controller. Keep total Kafka controllers to an odd number and ensure you do not assign ALL your Kafka nodes as controllers or this Kafka cluster will not start. + forcedType: "[]string" + multiline: True helpLink: kafka.html config: broker: @@ -27,7 +29,7 @@ kafka: forcedType: bool helpLink: kafka.html default_x_replication_x_factor: - description: The default replication factor for automatically created topics. + description: The default replication factor for automatically created topics. This value must be less than the amount of brokers in the cluster. Hosts specified in kafka_controllers should not be counted towards total broker count. title: default.replication.factor forcedType: int helpLink: kafka.html @@ -198,7 +200,7 @@ kafka: forcedType: int helpLink: kafka.html process_x_roles: - description: The role performed by KRaft controller node. + description: The role performed by controller node. title: process.roles readonly: True helpLink: kafka.html \ No newline at end of file From 386be4e746b08ebf4642ec82a2f7cecb84efe6e6 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 29 May 2024 16:48:39 -0400 Subject: [PATCH 647/777] WIP: Manage Kafka nodes pillar role value This way when kafka_controllers is updated the pillar value gets updated and any non-controllers get updated to revert to 'broker' only role. Needs more testing when a new controller joins in this manner Kafka errors due to cluster metadata being out of sync. One solution is to remove /nsm/kafka/data/__cluster_metadata-0/quorum-state and restart cluster. Alternative is working with Kafka cli tools to inform cluster of new voter, likely best option but requires a wrapper script of some sort to be created for updating cluster in-place. Easiest option is to have all receivers join grid and then configure Kafka with specific controllers via SOC UI prior to enabling Kafka. This way Kafka cluster comes up in the desired configuration with no need for immediately modifying cluster Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/config.map.jinja | 19 ++++++------------- salt/kafka/nodes.map.jinja | 34 ++++++++++++++++++++++++++++++---- 2 files changed, 36 insertions(+), 17 deletions(-) diff --git a/salt/kafka/config.map.jinja b/salt/kafka/config.map.jinja index 4e82eac42..e5b77db11 100644 --- a/salt/kafka/config.map.jinja +++ b/salt/kafka/config.map.jinja @@ -12,19 +12,12 @@ {# Create list of KRaft controllers #} {% set controllers = [] %} -{% if KAFKA_CONTROLLERS_PILLAR != none %} -{% for node in KAFKA_CONTROLLERS_PILLAR %} -{# Check that the user input for kafka_controllers pillar exists as a kafka:node value #} -{% if node in KAFKA_NODES_PILLAR %} -{% do controllers.append(KAFKA_NODES_PILLAR[node]['nodeid'] ~ '@' ~ node ~ ':9093') %} -{% endif %} -{% endfor %} -{% endif %} -{# Ensure in the event that the SOC controllers pillar has a single hostname and that hostname doesn't exist in kafka:nodes - that a controller is still set for the Kafka cluster. Defaulting to the grid manager #} -{% if controllers | length < 1 %} -{% do controllers.append(KAFKA_NODES_PILLAR[GLOBALS.manager]['nodeid'] ~ "@" ~ GLOBALS.manager ~ ":9093") %} -{% endif %} +{# Check for Kafka nodes with controller in process_x_roles #} +{% for node in KAFKA_NODES_PILLAR %} +{% if 'controller' in KAFKA_NODES_PILLAR[node].role %} +{% do controllers.append(KAFKA_NODES_PILLAR[node].nodeid ~ "@" ~ node ~ ":9093") %} +{% endif %} +{% endfor %} {% set kafka_controller_quorum_voters = ','.join(controllers) %} diff --git a/salt/kafka/nodes.map.jinja b/salt/kafka/nodes.map.jinja index 9b4979e92..e629ff783 100644 --- a/salt/kafka/nodes.map.jinja +++ b/salt/kafka/nodes.map.jinja @@ -8,6 +8,10 @@ {% from 'vars/globals.map.jinja' import GLOBALS %} {% set process_x_roles = KAFKADEFAULTS.kafka.config.broker.process_x_roles %} +{# Set grid manager to default process_x_roles of broker,controller until overridden via SOC UI #} +{% if grains.role in ["so-standalone", "so-manager", "so-managersearch"] %} +{% set process_x_roles = 'broker,controller'%} +{% endif %} {% set current_kafkanodes = salt.saltutil.runner( 'mine.get', @@ -16,6 +20,7 @@ tgt_type='compound') %} {% set STORED_KAFKANODES = salt['pillar.get']('kafka:nodes', default=None) %} +{% set KAFKA_CONTROLLERS_PILLAR = salt['pillar.get']('kafka:kafka_controllers', default=None) %} {% set existing_ids = [] %} @@ -43,10 +48,6 @@ {% set NEW_KAFKANODES = {} %} {% for minionid, ip in current_kafkanodes.items() %} {% set hostname = minionid.split('_')[0] %} -{# Override the default process_x_roles for manager and set to 'broker,controller'. Changes from SOC UI will overwrite this #} -{% if hostname == GLOBALS.manager %} -{% set process_x_roles = 'broker,controller' %} -{% endif %} {% if STORED_KAFKANODES != none and hostname not in STORED_KAFKANODES.items() %} {% set new_id = available_ids.pop(0) %} {% do NEW_KAFKANODES.update({hostname: {'nodeid': new_id, 'ip': ip[0], 'role': process_x_roles }}) %} @@ -67,3 +68,28 @@ {% do COMBINED_KAFKANODES.update({node: details}) %} {% endfor %} {% endif %} + +{# Update the process_x_roles value for any host in the kafka_controllers_pillar configured from SOC UI #} +{% set ns = namespace(has_controller=false) %} +{% if KAFKA_CONTROLLERS_PILLAR != none %} +{% for hostname in KAFKA_CONTROLLERS_PILLAR %} +{% if hostname in COMBINED_KAFKANODES %} +{% do COMBINED_KAFKANODES[hostname].update({'role': 'controller'}) %} +{% set ns.has_controller = true %} +{% endif %} +{% endfor %} +{% for hostname in COMBINED_KAFKANODES %} +{% if hostname not in KAFKA_CONTROLLERS_PILLAR %} +{% do COMBINED_KAFKANODES[hostname].update({'role': 'broker'}) %} +{% endif %} +{% endfor %} +{# If the kafka_controllers_pillar is NOT empty check that atleast one node contains the controller role. + otherwise default to GLOBALS.manager having broker,controller role #} +{% if not ns.has_controller %} +{% do COMBINED_KAFKANODES[GLOBALS.manager].update({'role': 'broker,controller'}) %} +{% endif %} +{# If kafka_controllers_pillar is empty, default to having grid manager as 'broker,controller' + so there is always atleast 1 controller in the cluster #} +{% else %} +{% do COMBINED_KAFKANODES[GLOBALS.manager].update({'role': 'broker,controller'}) %} +{% endif %} \ No newline at end of file From 62bdb2627aea9378f7c11ae9a72f04cbf2a14e96 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 29 May 2024 16:53:27 -0400 Subject: [PATCH 648/777] Update VERSION --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index b3c5d8c27..d2587d896 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.4.70 +2.4.80 From 949cea95f4428f259fbaac39f02b6f22d1b14ef5 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 29 May 2024 23:19:44 -0400 Subject: [PATCH 649/777] Update pillarWatch config for global.pipeline Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/salt/files/engines.conf | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/salt/salt/files/engines.conf b/salt/salt/files/engines.conf index b12ba02ef..027521386 100644 --- a/salt/salt/files/engines.conf +++ b/salt/salt/files/engines.conf @@ -39,9 +39,20 @@ engines: from: '*': to: - '*': + 'KAFKA': - cmd.run: - cmd: salt-call saltutil.kill_all_jobs + cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled True - cmd.run: - cmd: salt-call state.highstate + cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' saltutil.kill_all_jobs + - cmd.run: + cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' state.highstate + 'KAFKA': + to: + 'REDIS': + - cmd.run: + cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled False + - cmd.run: + cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' saltutil.kill_all_jobs + - cmd.run: + cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' state.highstate interval: 10 From 55c5ea5c4cf75f8c73e2762ce6382f0adbe9d120 Mon Sep 17 00:00:00 2001 From: Wes Date: Thu, 30 May 2024 16:58:56 +0000 Subject: [PATCH 650/777] Add template for Suricata alerts --- salt/elasticsearch/defaults.yaml | 111 +++++++++++++++++++++++++++++++ 1 file changed, 111 insertions(+) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index e54d58c3b..6ecdc96a1 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -11088,6 +11088,117 @@ elasticsearch: set_priority: priority: 50 min_age: 30d + so-suricata_x_alerts: + index_sorting: false + index_template: + composed_of: + - agent-mappings + - dtc-agent-mappings + - base-mappings + - dtc-base-mappings + - client-mappings + - dtc-client-mappings + - cloud-mappings + - container-mappings + - data_stream-mappings + - destination-mappings + - dtc-destination-mappings + - pb-override-destination-mappings + - dll-mappings + - dns-mappings + - dtc-dns-mappings + - ecs-mappings + - dtc-ecs-mappings + - error-mappings + - event-mappings + - dtc-event-mappings + - file-mappings + - dtc-file-mappings + - group-mappings + - host-mappings + - dtc-host-mappings + - http-mappings + - dtc-http-mappings + - log-mappings + - network-mappings + - dtc-network-mappings + - observer-mappings + - dtc-observer-mappings + - orchestrator-mappings + - organization-mappings + - package-mappings + - process-mappings + - dtc-process-mappings + - registry-mappings + - related-mappings + - rule-mappings + - dtc-rule-mappings + - server-mappings + - service-mappings + - dtc-service-mappings + - source-mappings + - dtc-source-mappings + - pb-override-source-mappings + - suricata-mappings + - threat-mappings + - tls-mappings + - tracing-mappings + - url-mappings + - user_agent-mappings + - dtc-user_agent-mappings + - vulnerability-mappings + - common-settings + - common-dynamic-mappings + data_stream: {} + index_patterns: + - logs-suricata.alerts-* + priority: 500 + template: + mappings: + date_detection: false + dynamic_templates: + - strings_as_keyword: + mapping: + ignore_above: 1024 + type: keyword + match_mapping_type: string + settings: + index: + lifecycle: + name: so-suricata.alerts-logs + mapping: + total_fields: + limit: 5000 + number_of_replicas: 0 + number_of_shards: 1 + refresh_interval: 30s + sort: + field: '@timestamp' + order: desc + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 60d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 1d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d so-syslog: index_sorting: false index_template: From e83135440198e8ed2bb0f79e420e6abd7d73e7be Mon Sep 17 00:00:00 2001 From: Wes Date: Thu, 30 May 2024 17:00:11 +0000 Subject: [PATCH 651/777] Add Suricata alerts setting for configuration --- salt/elasticsearch/soc_elasticsearch.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/elasticsearch/soc_elasticsearch.yaml b/salt/elasticsearch/soc_elasticsearch.yaml index 000fd60b7..f56ed313e 100644 --- a/salt/elasticsearch/soc_elasticsearch.yaml +++ b/salt/elasticsearch/soc_elasticsearch.yaml @@ -521,6 +521,7 @@ elasticsearch: so-endgame: *indexSettings so-idh: *indexSettings so-suricata: *indexSettings + so-suricata_x_alerts: *indexSettings so-import: *indexSettings so-kratos: *indexSettings so-kismet: *indexSettings From 48713a4e7b1eeabe0568390359ce9450e3ef0130 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 30 May 2024 13:00:34 -0400 Subject: [PATCH 652/777] revert version for soup test before 2.4.80 pipeline unpaused Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index d2587d896..b3c5d8c27 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.4.80 +2.4.70 From 2c635bce6201859256af9891c4ae76411e8d38de Mon Sep 17 00:00:00 2001 From: Wes Date: Thu, 30 May 2024 17:02:31 +0000 Subject: [PATCH 653/777] Set index for Suricata alerts --- salt/elasticsearch/files/ingest/suricata.alert | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/elasticsearch/files/ingest/suricata.alert b/salt/elasticsearch/files/ingest/suricata.alert index 87d5144ed..68e0a5cb3 100644 --- a/salt/elasticsearch/files/ingest/suricata.alert +++ b/salt/elasticsearch/files/ingest/suricata.alert @@ -1,6 +1,7 @@ { "description" : "suricata.alert", "processors" : [ + { "set": { "field": "_index", "value": "logs-suricata.alerts-so" } }, { "set": { "field": "tags","value": "alert" }}, { "rename":{ "field": "message2.alert", "target_field": "rule", "ignore_failure": true } }, { "rename":{ "field": "rule.signature", "target_field": "rule.name", "ignore_failure": true } }, From 7702f05756cf83830013e41217a756aa87c5876d Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 30 May 2024 15:00:32 -0400 Subject: [PATCH 654/777] upgrade salt 3006.8. soup for 2.4.80 --- salt/manager/tools/sbin/soup | 12 ++++++++++++ salt/salt/master.defaults.yaml | 2 +- salt/salt/minion.defaults.yaml | 2 +- 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 525fce3f6..30f40ee5a 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -358,6 +358,7 @@ preupgrade_changes() { [[ "$INSTALLEDVERSION" == 2.4.40 ]] && up_to_2.4.50 [[ "$INSTALLEDVERSION" == 2.4.50 ]] && up_to_2.4.60 [[ "$INSTALLEDVERSION" == 2.4.60 ]] && up_to_2.4.70 + [[ "$INSTALLEDVERSION" == 2.4.70 ]] && up_to_2.4.80 true } @@ -375,6 +376,7 @@ postupgrade_changes() { [[ "$POSTVERSION" == 2.4.40 ]] && post_to_2.4.50 [[ "$POSTVERSION" == 2.4.50 ]] && post_to_2.4.60 [[ "$POSTVERSION" == 2.4.60 ]] && post_to_2.4.70 + [[ "$POSTVERSION" == 2.4.70 ]] && post_to_2.4.80 true } @@ -448,6 +450,11 @@ post_to_2.4.70() { POSTVERSION=2.4.70 } +post_to_2.4.80() { + echo "Nothing to apply" + POSTVERSION=2.4.80 +} + repo_sync() { echo "Sync the local repo." su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync." @@ -595,6 +602,11 @@ up_to_2.4.70() { INSTALLEDVERSION=2.4.70 } +up_to_2.4.80() { + echo "Nothing to do for 2.4.80" + INSTALLEDVERSION=2.4.80 +} + add_detection_test_pillars() { if [[ -n "$SOUP_INTERNAL_TESTING" ]]; then echo "Adding detection pillar values for automated testing" diff --git a/salt/salt/master.defaults.yaml b/salt/salt/master.defaults.yaml index 19677f70b..24ba29d98 100644 --- a/salt/salt/master.defaults.yaml +++ b/salt/salt/master.defaults.yaml @@ -1,4 +1,4 @@ # version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched salt: master: - version: 3006.6 + version: 3006.8 diff --git a/salt/salt/minion.defaults.yaml b/salt/salt/minion.defaults.yaml index 2e4ebc93e..dddd6683b 100644 --- a/salt/salt/minion.defaults.yaml +++ b/salt/salt/minion.defaults.yaml @@ -1,6 +1,6 @@ # version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched salt: minion: - version: 3006.6 + version: 3006.8 check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default service_start_delay: 30 # in seconds. From dbb99d03679f2593533e0cb806ba31b5bdbf91d2 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 30 May 2024 15:10:15 -0400 Subject: [PATCH 655/777] Remove bad config Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/nodes.map.jinja | 4 ---- 1 file changed, 4 deletions(-) diff --git a/salt/kafka/nodes.map.jinja b/salt/kafka/nodes.map.jinja index e629ff783..fa33adda5 100644 --- a/salt/kafka/nodes.map.jinja +++ b/salt/kafka/nodes.map.jinja @@ -8,10 +8,6 @@ {% from 'vars/globals.map.jinja' import GLOBALS %} {% set process_x_roles = KAFKADEFAULTS.kafka.config.broker.process_x_roles %} -{# Set grid manager to default process_x_roles of broker,controller until overridden via SOC UI #} -{% if grains.role in ["so-standalone", "so-manager", "so-managersearch"] %} -{% set process_x_roles = 'broker,controller'%} -{% endif %} {% set current_kafkanodes = salt.saltutil.runner( 'mine.get', From 00b5a5cc0c9270c434ed0e55e1e5611497652e69 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 30 May 2024 15:13:16 -0400 Subject: [PATCH 656/777] Revert "revert version for soup test before 2.4.80 pipeline unpaused" This reverts commit 48713a4e7b1eeabe0568390359ce9450e3ef0130. --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index b3c5d8c27..d2587d896 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.4.70 +2.4.80 From 85c269e69758402cc1976187ca346016b29eac2e Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Thu, 30 May 2024 15:59:03 -0600 Subject: [PATCH 657/777] Added TemplateDetections To Detection ClientParams The UI can now insert templates when you select a Detection language. These are those templates, annotated. --- salt/soc/defaults.yaml | 33 +++++++++++++++++++++++++++++++++ salt/soc/soc_soc.yaml | 13 ++++++++++++- 2 files changed, 45 insertions(+), 1 deletion(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 9f5faf50b..f5628f3c3 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -2253,3 +2253,36 @@ soc: severityTranslations: minor: low major: high + templateDetections: + suricata: | + alert tcp any any <> any any (msg:""; sid:[publicId];) + strelka: | + rule { + meta: + description = ""; + strings: + $x = \"string\"; + condition: + all of them; + } + elastalert: | + title: + id: [publicId] + status: + description: + references: + - + author: + date: + tags: + - + logsource: + product: + category: + detection: + selection: + condition: selection + falsepositives: + - + level: + diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index 1f64eb0bc..47d051e4e 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -119,7 +119,7 @@ soc: advanced: True rulesRepos: default: &eerulesRepos - description: "Custom Git repositories to pull Sigma rules from. 'license' field is required, 'folder' is optional. 'community' disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled. The new settings will be applied within 15 minutes. At that point, you will need to wait for the scheduled rule update to take place (by default, every 24 hours), or you can force the update by nagivating to Detections --> Options dropdown menu --> Elastalert --> Full Update." + description: "Custom Git repositories to pull Sigma rules from. 'license' field is required, 'folder' is optional. 'community' disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled. The new settings will be applied within 15 minutes. At that point, you will need to wait for the scheduled rule update to take place (by default, every 24 hours), or you can force the update by nagivating to Detections --> Options dropdown menu --> Elastalert --> Full Update." global: True advanced: True forcedType: "[]{}" @@ -319,6 +319,17 @@ soc: cases: *appSettings dashboards: *appSettings detections: *appSettings + detection: + templateDetections: + suricata: + description: The template used when creating a new Suricata detection. [publicId] will be replaced with an unused Public Id. + multiline: True + strelka: + description: The template used when creating a new Strelka detection. + multiline: True + elastalert: + description: The template used when creating a new ElastAlert detection. [publicId] will be replaced with an unused Public Id. + multiline: True grid: maxUploadSize: description: The maximum number of bytes for an uploaded PCAP import file. From e3ea4776c70cf472c195c2bd50d895ba3cd4d5d4 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Fri, 31 May 2024 13:34:28 -0400 Subject: [PATCH 658/777] Update kafka nodes pillar before running highstate with pillarwatch engine. This allows configuring your Kafka controllers before cluster comes up for the first time Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/salt/files/engines.conf | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/salt/files/engines.conf b/salt/salt/files/engines.conf index 027521386..69d596ed0 100644 --- a/salt/salt/files/engines.conf +++ b/salt/salt/files/engines.conf @@ -44,6 +44,8 @@ engines: cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled True - cmd.run: cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' saltutil.kill_all_jobs + - cmd.run: + cmd: salt-call state.apply kafka.nodes - cmd.run: cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' state.highstate 'KAFKA': From f396247838bd3c3cf8f3f0c49d20b500a5f4551b Mon Sep 17 00:00:00 2001 From: Wes Date: Fri, 31 May 2024 17:46:19 +0000 Subject: [PATCH 659/777] Add index templates and lifecycle policies --- salt/elasticsearch/defaults.yaml | 72 ++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index 6ecdc96a1..36d673d70 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -170,6 +170,78 @@ elasticsearch: set_priority: priority: 50 min_age: 30d + so-items: + index_sorting: false + index_template: + composed_of: + - so-items-mappings + index_patterns: + - .items-default-** + priority: 500 + template: + mappings: + date_detection: false + settings: + index: + lifecycle: + name: so-items-logs + rollover_alias: ".items-default" + routing: + allocation: + include: + _tier_preference: "data_content" + mapping: + total_fields: + limit: 10000 + number_of_replicas: 0 + number_of_shards: 1 + refresh_interval: 30s + sort: + field: '@timestamp' + order: desc + policy: + phases: + hot: + actions: + rollover: + max_size: 50gb + min_age: 0ms + so-lists: + index_sorting: false + index_template: + composed_of: + - so-lists-mappings + index_patterns: + - .lists-default-** + priority: 500 + template: + mappings: + date_detection: false + settings: + index: + lifecycle: + name: so-lists-logs + rollover_alias: ".lists-default" + routing: + allocation: + include: + _tier_preference: "data_content" + mapping: + total_fields: + limit: 10000 + number_of_replicas: 0 + number_of_shards: 1 + refresh_interval: 30s + sort: + field: '@timestamp' + order: desc + policy: + phases: + hot: + actions: + rollover: + max_size: 50gb + min_age: 0ms so-case: index_sorting: false index_template: From a8c231ad8c59a09de4c134b6068d3f38bebe90d5 Mon Sep 17 00:00:00 2001 From: Wes Date: Fri, 31 May 2024 17:47:01 +0000 Subject: [PATCH 660/777] Add component templates --- .../elastic-agent/so-items-mappings.json | 112 ++++++++++++++++++ .../elastic-agent/so-lists-mappings.json | 55 +++++++++ 2 files changed, 167 insertions(+) create mode 100644 salt/elasticsearch/templates/component/elastic-agent/so-items-mappings.json create mode 100644 salt/elasticsearch/templates/component/elastic-agent/so-lists-mappings.json diff --git a/salt/elasticsearch/templates/component/elastic-agent/so-items-mappings.json b/salt/elasticsearch/templates/component/elastic-agent/so-items-mappings.json new file mode 100644 index 000000000..85e6c1984 --- /dev/null +++ b/salt/elasticsearch/templates/component/elastic-agent/so-items-mappings.json @@ -0,0 +1,112 @@ +{ + "template": { + "mappings": { + "dynamic": "strict", + "properties": { + "binary": { + "type": "binary" + }, + "boolean": { + "type": "boolean" + }, + "byte": { + "type": "byte" + }, + "created_at": { + "type": "date" + }, + "created_by": { + "type": "keyword" + }, + "date": { + "type": "date" + }, + "date_nanos": { + "type": "date_nanos" + }, + "date_range": { + "type": "date_range" + }, + "deserializer": { + "type": "keyword" + }, + "double": { + "type": "double" + }, + "double_range": { + "type": "double_range" + }, + "float": { + "type": "float" + }, + "float_range": { + "type": "float_range" + }, + "geo_point": { + "type": "geo_point" + }, + "geo_shape": { + "type": "geo_shape" + }, + "half_float": { + "type": "half_float" + }, + "integer": { + "type": "integer" + }, + "integer_range": { + "type": "integer_range" + }, + "ip": { + "type": "ip" + }, + "ip_range": { + "type": "ip_range" + }, + "keyword": { + "type": "keyword" + }, + "list_id": { + "type": "keyword" + }, + "long": { + "type": "long" + }, + "long_range": { + "type": "long_range" + }, + "meta": { + "type": "object", + "enabled": false + }, + "serializer": { + "type": "keyword" + }, + "shape": { + "type": "shape" + }, + "short": { + "type": "short" + }, + "text": { + "type": "text" + }, + "tie_breaker_id": { + "type": "keyword" + }, + "updated_at": { + "type": "date" + }, + "updated_by": { + "type": "keyword" + } + } + }, + "aliases": {} + }, + "version": 2, + "_meta": { + "managed": true, + "description": "default mappings for the .items index template installed by Kibana/Security" + } +} diff --git a/salt/elasticsearch/templates/component/elastic-agent/so-lists-mappings.json b/salt/elasticsearch/templates/component/elastic-agent/so-lists-mappings.json new file mode 100644 index 000000000..b2b5fda23 --- /dev/null +++ b/salt/elasticsearch/templates/component/elastic-agent/so-lists-mappings.json @@ -0,0 +1,55 @@ +{ + "template": { + "mappings": { + "dynamic": "strict", + "properties": { + "created_at": { + "type": "date" + }, + "created_by": { + "type": "keyword" + }, + "description": { + "type": "keyword" + }, + "deserializer": { + "type": "keyword" + }, + "immutable": { + "type": "boolean" + }, + "meta": { + "type": "object", + "enabled": false + }, + "name": { + "type": "keyword" + }, + "serializer": { + "type": "keyword" + }, + "tie_breaker_id": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "updated_at": { + "type": "date" + }, + "updated_by": { + "type": "keyword" + }, + "version": { + "type": "keyword" + } + } + }, + "aliases": {} + }, + "version": 2, + "_meta": { + "managed": true, + "description": "default mappings for the .lists index template installed by Kibana/Security" + } +} From 1a832fa0a5192d326f8c6ff0073dd8e5382cef9b Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Fri, 31 May 2024 14:04:46 -0400 Subject: [PATCH 661/777] Move soup kafka needfuls to up_to_2.4.80 Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/manager/tools/sbin/soup | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 8cb8fc6f5..95ca60b95 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -600,9 +600,13 @@ up_to_2.4.70() { toggle_telemetry add_detection_test_pillars + INSTALLEDVERSION=2.4.70 +} + +up_to_2.4.80() { # Kafka configuration changes - # Global pipeline changes to REDIS or KAFKA + # Global pipeline changes to REDIS or KAFKA echo "Removing global.pipeline pillar configuration" sed -i '/pipeline:/d' /opt/so/saltstack/local/pillar/global/soc_global.sls # Kafka pillars @@ -615,11 +619,6 @@ up_to_2.4.70() { kafkapass=$(get_random_value) echo ' kafka_pass: '$kafkapass >> /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls - INSTALLEDVERSION=2.4.70 -} - -up_to_2.4.80() { - echo "Nothing to do for 2.4.80" INSTALLEDVERSION=2.4.80 } From 2e85a28c0247a7c2303f029d2777c6bc6b0d4d46 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Sun, 2 Jun 2024 18:25:59 -0400 Subject: [PATCH 662/777] Remove so-kafka-clusterid script, created during soup Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/nodes.sls | 9 +------ salt/manager/tools/sbin/so-kafka-clusterid | 29 ---------------------- 2 files changed, 1 insertion(+), 37 deletions(-) delete mode 100644 salt/manager/tools/sbin/so-kafka-clusterid diff --git a/salt/kafka/nodes.sls b/salt/kafka/nodes.sls index 7cafb10bc..cae2a1d0f 100644 --- a/salt/kafka/nodes.sls +++ b/salt/kafka/nodes.sls @@ -15,11 +15,4 @@ write_kafka_pillar_yaml: - source: salt://kafka/files/managed_node_pillar.jinja - template: jinja - context: - COMBINED_KAFKANODES: {{ COMBINED_KAFKANODES }} - - -{% if kafka_cluster_id is none %} -generate_kafka_cluster_id: - cmd.run: - - name: /usr/sbin/so-kafka-clusterid -{% endif %} \ No newline at end of file + COMBINED_KAFKANODES: {{ COMBINED_KAFKANODES }} \ No newline at end of file diff --git a/salt/manager/tools/sbin/so-kafka-clusterid b/salt/manager/tools/sbin/so-kafka-clusterid deleted file mode 100644 index c4e449448..000000000 --- a/salt/manager/tools/sbin/so-kafka-clusterid +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - - -### THIS SCRIPT AND SALT STATE REFERENCES TO THIS SCRIPT TO BE REMOVED ONCE INITIAL TESTING IS DONE - THESE VALUES WILL GENERATED IN SETUP AND SOUP - - -local_salt_dir=/opt/so/saltstack/local - -if [[ -f /usr/sbin/so-common ]]; then - source /usr/sbin/so-common -else - source $(dirname $0)/../../../common/tools/sbin/so-common -fi - -if ! grep -q "^ cluster_id:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then - kafka_cluster_id=$(get_random_value 22) - echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls -fi - -if ! grep -q "^ kafka_pass:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then - kafkapass=$(get_random_value) - echo ' kafka_pass: '$kafkapass >> $local_salt_dir/pillar/kafka/soc_kafka.sls -fi \ No newline at end of file From c88b731793045bd957b28e4d519cab0cebfc2f34 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 3 Jun 2024 15:27:08 -0400 Subject: [PATCH 663/777] revert to 3006.6 --- salt/salt/master.defaults.yaml | 2 +- salt/salt/minion.defaults.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/salt/master.defaults.yaml b/salt/salt/master.defaults.yaml index 24ba29d98..19677f70b 100644 --- a/salt/salt/master.defaults.yaml +++ b/salt/salt/master.defaults.yaml @@ -1,4 +1,4 @@ # version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched salt: master: - version: 3006.8 + version: 3006.6 diff --git a/salt/salt/minion.defaults.yaml b/salt/salt/minion.defaults.yaml index dddd6683b..2e4ebc93e 100644 --- a/salt/salt/minion.defaults.yaml +++ b/salt/salt/minion.defaults.yaml @@ -1,6 +1,6 @@ # version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched salt: minion: - version: 3006.8 + version: 3006.6 check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default service_start_delay: 30 # in seconds. From d9c58d9333f24f695d6bc4a663c50cfe48683134 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 4 Jun 2024 08:33:45 -0400 Subject: [PATCH 664/777] update receiver pillar access Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- pillar/top.sls | 1 + 1 file changed, 1 insertion(+) diff --git a/pillar/top.sls b/pillar/top.sls index 61b812cc8..f7ec39957 100644 --- a/pillar/top.sls +++ b/pillar/top.sls @@ -242,6 +242,7 @@ base: - kafka.nodes - kafka.soc_kafka - kafka.adv_kafka + - soc.license '*_import': - secrets From c0b2cf73883954d8e3feff579d7f19d2446c2120 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 4 Jun 2024 10:28:21 -0400 Subject: [PATCH 665/777] add the curlys --- salt/common/tools/sbin_jinja/so-tcpreplay | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/common/tools/sbin_jinja/so-tcpreplay b/salt/common/tools/sbin_jinja/so-tcpreplay index 6f3f02983..a9551c0fa 100755 --- a/salt/common/tools/sbin_jinja/so-tcpreplay +++ b/salt/common/tools/sbin_jinja/so-tcpreplay @@ -57,8 +57,8 @@ if ! docker ps | grep -q so-tcpreplay; then fi if is_sensor_node; then - echo "Replaying PCAP(s) at ${REPLAYSPEED} Mbps on interface $REPLAYIFACE..." - docker exec so-tcpreplay /usr/bin/bash -c "/usr/local/bin/tcpreplay -i $REPLAYIFACE -M${REPLAYSPEED} $@" + echo "Replaying PCAP(s) at ${REPLAYSPEED} Mbps on interface ${REPLAYIFACE}..." + docker exec so-tcpreplay /usr/bin/bash -c "/usr/local/bin/tcpreplay -i ${REPLAYIFACE} -M${REPLAYSPEED} $@" echo "Replay completed. Warnings shown above are typically expected." elif is_manager_node; then From fb1d4fdd3c1f58f4de6a100ec5f14d967aec5b92 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 4 Jun 2024 12:33:51 -0400 Subject: [PATCH 666/777] update license Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- .../so-elastic-fleet-outputs-update | 7 +++--- salt/kafka/disabled.sls | 8 +++++++ salt/kafka/elasticfleet.sls | 2 ++ salt/kafka/enabled.sls | 22 ++++++++++++++++++- salt/kafka/init.sls | 8 ++++++- 5 files changed, 42 insertions(+), 5 deletions(-) diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update index 064d49d23..b5d6e1bfe 100644 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update @@ -131,9 +131,10 @@ if [ "$NEW_HASH" = "$CURRENT_HASH" ]; then # Since output can be KAFKA or LOGSTASH, we need to check if the policy set as default matches the value set in GLOBALS.pipeline and update if needed printf "Checking if the correct output policy is set as default\n" OUTPUT_DEFAULT=$(jq -r '.item.is_default' <<< $RAW_JSON) - if [ "$OUTPUT_DEFAULT" = "false" ]; then + OUTPUT_DEFAULT_MONITORING=$(jq -r '.item.is_default_monitoring' <<< $RAW_JSON) + if [[ "$OUTPUT_DEFAULT" = "false" || "$OUTPUT_DEFAULT_MONITORING" = "false" ]]; then printf "Default output policy needs to be updated.\n" - {%- if GLOBALS.pipeline == "KAFKA" %} + {%- if GLOBALS.pipeline == "KAFKA" and 'gmd' in salt['pillar.get']('features', []) %} update_kafka_outputs {%- else %} update_logstash_outputs @@ -145,7 +146,7 @@ if [ "$NEW_HASH" = "$CURRENT_HASH" ]; then else printf "\nHashes don't match - update needed.\n" printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n" - {%- if GLOBALS.pipeline == "KAFKA" %} + {%- if GLOBALS.pipeline == "KAFKA" and 'gmd' in salt['pillar.get']('features', []) %} update_kafka_outputs {%- else %} update_logstash_outputs diff --git a/salt/kafka/disabled.sls b/salt/kafka/disabled.sls index 6658f0c5e..0027fbfb9 100644 --- a/salt/kafka/disabled.sls +++ b/salt/kafka/disabled.sls @@ -14,3 +14,11 @@ so-kafka_so-status.disabled: file.comment: - name: /opt/so/conf/so-status/so-status.conf - regex: ^so-kafka$ + +{% if grains.role in ['so-manager','so-managersearch','so-standalone'] %} +ensure_default_pipeline: + cmd.run: + - name: | + /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled False; + /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/global/soc_global.sls global.pipeline REDIS +{% endif %} \ No newline at end of file diff --git a/salt/kafka/elasticfleet.sls b/salt/kafka/elasticfleet.sls index a91df765b..ae8899821 100644 --- a/salt/kafka/elasticfleet.sls +++ b/salt/kafka/elasticfleet.sls @@ -4,6 +4,8 @@ # Elastic License 2.0. {% from 'vars/globals.map.jinja' import GLOBALS %} +include: + - elasticfleet.enabled {# Create Kafka output policy if it doesn't exist #} update_kafka_output_policy_script: diff --git a/salt/kafka/enabled.sls b/salt/kafka/enabled.sls index 833cc7f3c..e90a314d2 100644 --- a/salt/kafka/enabled.sls +++ b/salt/kafka/enabled.sls @@ -2,12 +2,19 @@ # or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. +# +# Note: Per the Elastic License 2.0, the second limitation states: +# +# "You may not move, change, disable, or circumvent the license key functionality +# in the software, and you may not remove or obscure any functionality in the +# software that is protected by the license key." {% from 'allowed_states.map.jinja' import allowed_states %} {% if sls.split('.')[0] in allowed_states %} {% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'docker/docker.map.jinja' import DOCKER %} -{% set KAFKANODES = salt['pillar.get']('kafka:nodes') %} +{% set KAFKANODES = salt['pillar.get']('kafka:nodes') %} +{% if 'gmd' in salt['pillar.get']('features', []) %} include: - elasticsearch.ca @@ -59,6 +66,19 @@ delete_so-kafka_so-status.disabled: - name: /opt/so/conf/so-status/so-status.conf - regex: ^so-kafka$ +{% else %} + +{{sls}}_no_license_detected: + test.fail_without_changes: + - name: {{sls}}_no_license_detected + - comment: + - "Kafka for Guaranteed Message Delivery is a feature supported only for customers with a valid license. + Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com + for more information about purchasing a license to enable this feature." +include: + - kafka.disabled +{% endif %} + {% else %} {{sls}}_state_not_allowed: diff --git a/salt/kafka/init.sls b/salt/kafka/init.sls index 67b66c45d..49707033e 100644 --- a/salt/kafka/init.sls +++ b/salt/kafka/init.sls @@ -1,7 +1,13 @@ # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. +# +# Note: Per the Elastic License 2.0, the second limitation states: +# +# "You may not move, change, disable, or circumvent the license key functionality +# in the software, and you may not remove or obscure any functionality in the +# software that is protected by the license key." {% from 'kafka/map.jinja' import KAFKAMERGED %} {% from 'vars/globals.map.jinja' import GLOBALS %} From 3b0339a9b3196491dac1f9a95ed053b056ca1eb1 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 4 Jun 2024 14:27:52 -0400 Subject: [PATCH 667/777] create kafka.id from kafka {partition}-{offset}-{timestamp} for tracking event Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 b/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 index c3e70ec2c..a17b3a17a 100644 --- a/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 +++ b/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 @@ -84,6 +84,7 @@ { "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } }, { "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } }, { "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } }, + { "set": { "field": "kafka.id", "value": "{{metadata.kafka.partition}}-{{metadata.kafka.offset}}-{{metadata.kafka.timestamp}}", "ignore_failure": true } }, { "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp" ], "ignore_missing": true, "ignore_failure": true } } ], "on_failure": [ From a2467d0418532cef20e9c13724b13ee1e4e8618f Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 5 Jun 2024 08:24:57 -0400 Subject: [PATCH 668/777] move so-tcpreplay to sensor state --- salt/sensor/init.sls | 19 ++++++++++++++++++- .../tools/sbin_jinja/so-tcpreplay | 0 2 files changed, 18 insertions(+), 1 deletion(-) rename salt/{common => sensor}/tools/sbin_jinja/so-tcpreplay (100%) diff --git a/salt/sensor/init.sls b/salt/sensor/init.sls index 53cd808c6..ca1cf13c2 100644 --- a/salt/sensor/init.sls +++ b/salt/sensor/init.sls @@ -9,4 +9,21 @@ execute_checksum: cmd.run: - name: /etc/NetworkManager/dispatcher.d/pre-up.d/99-so-checksum-offload-disable - onchanges: - - file: offload_script \ No newline at end of file + - file: offload_script + +sensor_sbin: + file.recurse: + - name: /usr/sbin + - source: salt://sensor/tools/sbin + - user: 939 + - group: 939 + - file_mode: 755 + +sensor_sbin_jinja: + file.recurse: + - name: /usr/sbin + - source: salt://sensor/tools/sbin_jinja + - user: 939 + - group: 939 + - file_mode: 755 + - template: jinja diff --git a/salt/common/tools/sbin_jinja/so-tcpreplay b/salt/sensor/tools/sbin_jinja/so-tcpreplay similarity index 100% rename from salt/common/tools/sbin_jinja/so-tcpreplay rename to salt/sensor/tools/sbin_jinja/so-tcpreplay From ff5773c8379d140cb6a239e89f4dd48672b15f7e Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 5 Jun 2024 08:56:32 -0400 Subject: [PATCH 669/777] move so-tcpreplay back to common. return empty string if no sensor.interface pillar --- .../tools/sbin_jinja/so-tcpreplay | 2 +- salt/sensor/init.sls | 18 +----------------- 2 files changed, 2 insertions(+), 18 deletions(-) rename salt/{sensor => common}/tools/sbin_jinja/so-tcpreplay (96%) diff --git a/salt/sensor/tools/sbin_jinja/so-tcpreplay b/salt/common/tools/sbin_jinja/so-tcpreplay similarity index 96% rename from salt/sensor/tools/sbin_jinja/so-tcpreplay rename to salt/common/tools/sbin_jinja/so-tcpreplay index a9551c0fa..969ca699f 100755 --- a/salt/sensor/tools/sbin_jinja/so-tcpreplay +++ b/salt/common/tools/sbin_jinja/so-tcpreplay @@ -10,7 +10,7 @@ . /usr/sbin/so-common . /usr/sbin/so-image-common -REPLAYIFACE=${REPLAYIFACE:-"{{pillar.sensor.interface}}"} +REPLAYIFACE=${REPLAYIFACE:-"{{salt['pillar.get']('sensor:interface', '')}}"} REPLAYSPEED=${REPLAYSPEED:-10} mkdir -p /opt/so/samples diff --git a/salt/sensor/init.sls b/salt/sensor/init.sls index ca1cf13c2..c9c6a6db5 100644 --- a/salt/sensor/init.sls +++ b/salt/sensor/init.sls @@ -10,20 +10,4 @@ execute_checksum: - name: /etc/NetworkManager/dispatcher.d/pre-up.d/99-so-checksum-offload-disable - onchanges: - file: offload_script - -sensor_sbin: - file.recurse: - - name: /usr/sbin - - source: salt://sensor/tools/sbin - - user: 939 - - group: 939 - - file_mode: 755 - -sensor_sbin_jinja: - file.recurse: - - name: /usr/sbin - - source: salt://sensor/tools/sbin_jinja - - user: 939 - - group: 939 - - file_mode: 755 - - template: jinja + \ No newline at end of file From f6a8a21f94715786f6b645b6342de76a836d498a Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 5 Jun 2024 08:58:46 -0400 Subject: [PATCH 670/777] remove space --- salt/sensor/init.sls | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/sensor/init.sls b/salt/sensor/init.sls index c9c6a6db5..730a7c7ad 100644 --- a/salt/sensor/init.sls +++ b/salt/sensor/init.sls @@ -10,4 +10,3 @@ execute_checksum: - name: /etc/NetworkManager/dispatcher.d/pre-up.d/99-so-checksum-offload-disable - onchanges: - file: offload_script - \ No newline at end of file From c4723263a485a5025d7793d088e00dc566d34bd0 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 6 Jun 2024 08:59:17 -0400 Subject: [PATCH 671/777] Remove unused kafka reactor Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/reactor/kafka.sls | 16 ---------------- 1 file changed, 16 deletions(-) delete mode 100644 salt/reactor/kafka.sls diff --git a/salt/reactor/kafka.sls b/salt/reactor/kafka.sls deleted file mode 100644 index 879fb5431..000000000 --- a/salt/reactor/kafka.sls +++ /dev/null @@ -1,16 +0,0 @@ -{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -https://securityonion.net/license; you may not use this file except in compliance with the -Elastic License 2.0. #} - -{% set minionid = data['id'].split('_')[0] %} -{% set role = data['data']['process_x_roles'] %} - -{# Run so-yaml to replace kafka.node..role with the value from kafka/controllers.sls #} - -update_global_kafka_pillar: - local.cmd.run: - - tgt: 'G@role:so-manager or G@role:so-managersearch or G@role:so-standalone' - - tgt_type: compound - - arg: - - '/usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/nodes.sls kafka.nodes.{{ minionid }}.role {{ role }}' \ No newline at end of file From ccd6b3914cda395999a0fc3819b9ed0cd0ef569d Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 6 Jun 2024 10:33:55 -0400 Subject: [PATCH 672/777] add final msg queue for soup. --- salt/manager/tools/sbin/soup | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 02c01920d..258c09ed6 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -19,6 +19,9 @@ SOUP_LOG=/root/soup.log WHATWOULDYOUSAYYAHDOHERE=soup whiptail_title='Security Onion UPdater' NOTIFYCUSTOMELASTICCONFIG=false +# used to display messages to the user at the end of soup +declare -a FINAL_MESSAGE_QUEUE=() + check_err() { local exit_code=$1 @@ -344,6 +347,22 @@ masterunlock() { mv -v $BACKUPTOPFILE $TOPFILE } +phases_pillar_2_4_80() { + echo "Checking if pillar value: elasticsearch.index_settings.global_overrides.index_template.phases exists" + + #so-yaml.py remove /opt/so/saltstack/local/pillar/elasticsearch/soc_elasticsearch.sls elasticsearch.index_settings.global_overrides.index_template.phases + #if so-yaml removed stuff add this message to the FINAL_MESSAGE_QUEUE + read -r -d '' msg << EOM + Found elasticsearch.index_settings.global_overrides.index_template.phases set to: + so-yaml removed stuff here + A backup of all pillars was saved to /nsm/backup/ + Removed unused pillar value: elasticsearch.index_settings.global_overrides.index_template.phases + If you want to set policies, navigate to the SOC Grid Configuration UI at elasticsearch.index_settings.global_overrides.policy.phases" + EOM + + FINAL_MESSAGE_QUEUE+=("$msg") +} + preupgrade_changes() { # This function is to add any new pillar items if needed. echo "Checking to see if changes are needed." @@ -603,7 +622,7 @@ up_to_2.4.70() { } up_to_2.4.80() { - echo "Nothing to do for 2.4.80" + phases_pillar_2_4_80 INSTALLEDVERSION=2.4.80 } @@ -1267,6 +1286,14 @@ EOF fi +# check if the FINAL_MESSAGE_QUEUE is not empty +if (( ${#FINAL_MESSAGE_QUEUE[@]} != 0 )); then + for m in "${FINAL_MESSAGE_QUEUE[@]}"; do + echo "$m" + echo + done +fi + echo "### soup has been served at $(date) ###" } From 6920b77b4a3425fd3eb8f6a5316d702d5f6cbf1f Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 6 Jun 2024 11:00:43 -0400 Subject: [PATCH 673/777] fix msg --- salt/manager/tools/sbin/soup | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 258c09ed6..c510e832b 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -352,13 +352,13 @@ phases_pillar_2_4_80() { #so-yaml.py remove /opt/so/saltstack/local/pillar/elasticsearch/soc_elasticsearch.sls elasticsearch.index_settings.global_overrides.index_template.phases #if so-yaml removed stuff add this message to the FINAL_MESSAGE_QUEUE - read -r -d '' msg << EOM + read -r -d '' msg << EOF Found elasticsearch.index_settings.global_overrides.index_template.phases set to: so-yaml removed stuff here A backup of all pillars was saved to /nsm/backup/ Removed unused pillar value: elasticsearch.index_settings.global_overrides.index_template.phases - If you want to set policies, navigate to the SOC Grid Configuration UI at elasticsearch.index_settings.global_overrides.policy.phases" - EOM + If you want to set policies, navigate to the SOC Grid Configuration UI at elasticsearch.index_settings.global_overrides.policy.phases +EOF FINAL_MESSAGE_QUEUE+=("$msg") } From 5600fed9c4854f06e98c9aed31d0ce0550d13936 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 6 Jun 2024 11:56:07 -0400 Subject: [PATCH 674/777] add ability to retrieve yaml values via so-yaml.py; improve so-minion id matching --- pyci.sh | 14 +-- salt/manager/tools/sbin/so-minion | 4 +- salt/manager/tools/sbin/so-yaml.py | 74 +++++++++++----- salt/manager/tools/sbin/so-yaml_test.py | 111 +++++++++++++++++++----- 4 files changed, 149 insertions(+), 54 deletions(-) diff --git a/pyci.sh b/pyci.sh index e85287063..8cbee5e75 100755 --- a/pyci.sh +++ b/pyci.sh @@ -15,12 +15,16 @@ TARGET_DIR=${1:-.} PATH=$PATH:/usr/local/bin -if ! which pytest &> /dev/null || ! which flake8 &> /dev/null ; then - echo "Missing dependencies. Consider running the following command:" - echo " python -m pip install flake8 pytest pytest-cov" +if [ ! -d .venv ]; then + python -m venv .venv +fi + +source .venv/bin/activate + +if ! pip install flake8 pytest pytest-cov pyyaml; then + echo "Unable to install dependencies." exit 1 fi -pip install pytest pytest-cov flake8 "$TARGET_DIR" "--config=${HOME_DIR}/pytest.ini" -python3 -m pytest "--cov-config=${HOME_DIR}/pytest.ini" "--cov=$TARGET_DIR" --doctest-modules --cov-report=term --cov-fail-under=100 "$TARGET_DIR" \ No newline at end of file +python3 -m pytest "--cov-config=${HOME_DIR}/pytest.ini" "--cov=$TARGET_DIR" --doctest-modules --cov-report=term --cov-fail-under=100 "$TARGET_DIR" diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index 8b563ef1d..da1a6d2a2 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -112,8 +112,8 @@ function testMinion() { result=$? # If this so-minion script is not running on the given minion ID, run so-test remotely on the sensor as well - local_id=$(lookup_grain id) - if [[ ! "$local_id" =~ "${MINION_ID}_" ]]; then + local_id=$(lookup_grain id) + if [[ ! "$local_id" =~ "${MINION_ID}_" && "$local_id" != "${MINION_ID}" ]]; then salt "$MINION_ID" cmd.run 'so-test' result=$? fi diff --git a/salt/manager/tools/sbin/so-yaml.py b/salt/manager/tools/sbin/so-yaml.py index 5427a2e48..cddc827b5 100755 --- a/salt/manager/tools/sbin/so-yaml.py +++ b/salt/manager/tools/sbin/so-yaml.py @@ -14,19 +14,20 @@ lockFile = "/tmp/so-yaml.lock" def showUsage(args): - print('Usage: {} [ARGS...]'.format(sys.argv[0])) - print(' General commands:') - print(' append - Append a list item to a yaml key, if it exists and is a list. Requires KEY and LISTITEM args.') - print(' add - Add a new key and set its value. Fails if key already exists. Requires KEY and VALUE args.') - print(' remove - Removes a yaml key, if it exists. Requires KEY arg.') - print(' replace - Replaces (or adds) a new key and set its value. Requires KEY and VALUE args.') - print(' help - Prints this usage information.') - print('') - print(' Where:') - print(' YAML_FILE - Path to the file that will be modified. Ex: /opt/so/conf/service/conf.yaml') - print(' KEY - YAML key, does not support \' or " characters at this time. Ex: level1.level2') - print(' VALUE - Value to set for a given key') - print(' LISTITEM - Item to append to a given key\'s list value') + print('Usage: {} [ARGS...]'.format(sys.argv[0]), file=sys.stderr) + print(' General commands:', file=sys.stderr) + print(' append - Append a list item to a yaml key, if it exists and is a list. Requires KEY and LISTITEM args.', file=sys.stderr) + print(' add - Add a new key and set its value. Fails if key already exists. Requires KEY and VALUE args.', file=sys.stderr) + print(' get - Displays (to stdout) the value stored in the given key. Requires KEY arg.', file=sys.stderr) + print(' remove - Removes a yaml key, if it exists. Requires KEY arg.', file=sys.stderr) + print(' replace - Replaces (or adds) a new key and set its value. Requires KEY and VALUE args.', file=sys.stderr) + print(' help - Prints this usage information.', file=sys.stderr) + print('', file=sys.stderr) + print(' Where:', file=sys.stderr) + print(' YAML_FILE - Path to the file that will be modified. Ex: /opt/so/conf/service/conf.yaml', file=sys.stderr) + print(' KEY - YAML key, does not support \' or " characters at this time. Ex: level1.level2', file=sys.stderr) + print(' VALUE - Value to set for a given key', file=sys.stderr) + print(' LISTITEM - Item to append to a given key\'s list value', file=sys.stderr) sys.exit(1) @@ -38,7 +39,7 @@ def loadYaml(filename): def writeYaml(filename, content): file = open(filename, "w") - return yaml.dump(content, file) + return yaml.safe_dump(content, file) def appendItem(content, key, listItem): @@ -49,15 +50,15 @@ def appendItem(content, key, listItem): try: content[key].append(listItem) except AttributeError: - print("The existing value for the given key is not a list. No action was taken on the file.") + print("The existing value for the given key is not a list. No action was taken on the file.", file=sys.stderr) return 1 except KeyError: - print("The key provided does not exist. No action was taken on the file.") + print("The key provided does not exist. No action was taken on the file.", file=sys.stderr) return 1 def convertType(value): - if len(value) > 0 and (not value.startswith("0") or len(value) == 1): + if isinstance(value, str) and len(value) > 0 and (not value.startswith("0") or len(value) == 1): if "." in value: try: value = float(value) @@ -83,7 +84,7 @@ def append(args): if len(args) != 3: print('Missing filename, key arg, or list item to append', file=sys.stderr) showUsage(None) - return + return 1 filename = args[0] key = args[1] @@ -112,7 +113,7 @@ def add(args): if len(args) != 3: print('Missing filename, key arg, and/or value', file=sys.stderr) showUsage(None) - return + return 1 filename = args[0] key = args[1] @@ -137,7 +138,7 @@ def remove(args): if len(args) != 2: print('Missing filename or key arg', file=sys.stderr) showUsage(None) - return + return 1 filename = args[0] key = args[1] @@ -153,7 +154,7 @@ def replace(args): if len(args) != 3: print('Missing filename, key arg, and/or value', file=sys.stderr) showUsage(None) - return + return 1 filename = args[0] key = args[1] @@ -167,6 +168,32 @@ def replace(args): return 0 +def getKeyValue(content, key): + pieces = key.split(".", 1) + if len(pieces) > 1: + return getKeyValue(content[pieces[0]], pieces[1]) + return content.get(key, None) + + +def get(args): + if len(args) != 2: + print('Missing filename or key arg', file=sys.stderr) + showUsage(None) + return 1 + + filename = args[0] + key = args[1] + + content = loadYaml(filename) + output = getKeyValue(content, key) + if output is None: + print("Not found", file=sys.stderr) + return 2 + + print(yaml.safe_dump(output)) + return 0 + + def main(): args = sys.argv[1:] @@ -178,6 +205,7 @@ def main(): "help": showUsage, "add": add, "append": append, + "get": get, "remove": remove, "replace": replace, } @@ -195,11 +223,11 @@ def main(): break except Exception: if lockAttempts == 1: - print("Waiting for lock file to be released from another process...") + print("Waiting for lock file to be released from another process...", file=sys.stderr) time.sleep(2) if lockAttempts == maxAttempts: - print("Lock file (" + lockFile + ") could not be created; proceeding without lock.") + print("Lock file (" + lockFile + ") could not be created; proceeding without lock.", file=sys.stderr) cmd = commands.get(args[0], showUsage) code = cmd(args[1:]) diff --git a/salt/manager/tools/sbin/so-yaml_test.py b/salt/manager/tools/sbin/so-yaml_test.py index 7effabac9..ca9839e02 100644 --- a/salt/manager/tools/sbin/so-yaml_test.py +++ b/salt/manager/tools/sbin/so-yaml_test.py @@ -15,40 +15,40 @@ class TestRemove(unittest.TestCase): def test_main_missing_input(self): with patch('sys.exit', new=MagicMock()) as sysmock: - with patch('sys.stderr', new=StringIO()) as mock_stdout: + with patch('sys.stderr', new=StringIO()) as mock_stderr: sys.argv = ["cmd"] soyaml.main() sysmock.assert_called_once_with(1) - self.assertIn(mock_stdout.getvalue(), "Usage:") + self.assertIn("Usage:", mock_stderr.getvalue()) def test_main_help_locked(self): filename = "/tmp/so-yaml.lock" file = open(filename, "w") file.write = "fake lock file" with patch('sys.exit', new=MagicMock()) as sysmock: - with patch('sys.stderr', new=StringIO()) as mock_stdout: + with patch('sys.stderr', new=StringIO()) as mock_stderr: with patch('time.sleep', new=MagicMock()) as mock_sleep: sys.argv = ["cmd", "help"] soyaml.main() sysmock.assert_called() mock_sleep.assert_called_with(2) - self.assertIn(mock_stdout.getvalue(), "Usage:") + self.assertIn("Usage:", mock_stderr.getvalue()) def test_main_help(self): with patch('sys.exit', new=MagicMock()) as sysmock: - with patch('sys.stderr', new=StringIO()) as mock_stdout: + with patch('sys.stderr', new=StringIO()) as mock_stderr: sys.argv = ["cmd", "help"] soyaml.main() sysmock.assert_called() - self.assertIn(mock_stdout.getvalue(), "Usage:") + self.assertIn("Usage:", mock_stderr.getvalue()) def test_remove_missing_arg(self): with patch('sys.exit', new=MagicMock()) as sysmock: - with patch('sys.stderr', new=StringIO()) as mock_stdout: + with patch('sys.stderr', new=StringIO()) as mock_stderr: sys.argv = ["cmd", "help"] soyaml.remove(["file"]) sysmock.assert_called() - self.assertIn(mock_stdout.getvalue(), "Missing filename or key arg\n") + self.assertIn("Missing filename or key arg\n", mock_stderr.getvalue()) def test_remove(self): filename = "/tmp/so-yaml_test-remove.yaml" @@ -97,7 +97,7 @@ class TestRemove(unittest.TestCase): def test_remove_missing_args(self): with patch('sys.exit', new=MagicMock()) as sysmock: - with patch('sys.stderr', new=StringIO()) as mock_stdout: + with patch('sys.stderr', new=StringIO()) as mock_stderr: filename = "/tmp/so-yaml_test-remove.yaml" file = open(filename, "w") file.write("{key1: { child1: 123, child2: abc }, key2: false}") @@ -112,15 +112,15 @@ class TestRemove(unittest.TestCase): expected = "{key1: { child1: 123, child2: abc }, key2: false}" self.assertEqual(actual, expected) sysmock.assert_called_once_with(1) - self.assertIn(mock_stdout.getvalue(), "Missing filename or key arg\n") + self.assertIn("Missing filename or key arg\n", mock_stderr.getvalue()) def test_append_missing_arg(self): with patch('sys.exit', new=MagicMock()) as sysmock: - with patch('sys.stderr', new=StringIO()) as mock_stdout: + with patch('sys.stderr', new=StringIO()) as mock_stderr: sys.argv = ["cmd", "help"] soyaml.append(["file", "key"]) sysmock.assert_called() - self.assertIn(mock_stdout.getvalue(), "Missing filename, key arg, or list item to append\n") + self.assertIn("Missing filename, key arg, or list item to append\n", mock_stderr.getvalue()) def test_append(self): filename = "/tmp/so-yaml_test-remove.yaml" @@ -173,11 +173,11 @@ class TestRemove(unittest.TestCase): file.close() with patch('sys.exit', new=MagicMock()) as sysmock: - with patch('sys.stdout', new=StringIO()) as mock_stdout: + with patch('sys.stderr', new=StringIO()) as mock_stderr: sys.argv = ["cmd", "append", filename, "key4", "h"] soyaml.main() sysmock.assert_called() - self.assertEqual(mock_stdout.getvalue(), "The key provided does not exist. No action was taken on the file.\n") + self.assertEqual("The key provided does not exist. No action was taken on the file.\n", mock_stderr.getvalue()) def test_append_key_noexist_deep(self): filename = "/tmp/so-yaml_test-append.yaml" @@ -186,11 +186,11 @@ class TestRemove(unittest.TestCase): file.close() with patch('sys.exit', new=MagicMock()) as sysmock: - with patch('sys.stdout', new=StringIO()) as mock_stdout: + with patch('sys.stderr', new=StringIO()) as mock_stderr: sys.argv = ["cmd", "append", filename, "key1.child2.deep3", "h"] soyaml.main() sysmock.assert_called() - self.assertEqual(mock_stdout.getvalue(), "The key provided does not exist. No action was taken on the file.\n") + self.assertEqual("The key provided does not exist. No action was taken on the file.\n", mock_stderr.getvalue()) def test_append_key_nonlist(self): filename = "/tmp/so-yaml_test-append.yaml" @@ -199,11 +199,11 @@ class TestRemove(unittest.TestCase): file.close() with patch('sys.exit', new=MagicMock()) as sysmock: - with patch('sys.stdout', new=StringIO()) as mock_stdout: + with patch('sys.stderr', new=StringIO()) as mock_stderr: sys.argv = ["cmd", "append", filename, "key1", "h"] soyaml.main() sysmock.assert_called() - self.assertEqual(mock_stdout.getvalue(), "The existing value for the given key is not a list. No action was taken on the file.\n") + self.assertEqual("The existing value for the given key is not a list. No action was taken on the file.\n", mock_stderr.getvalue()) def test_append_key_nonlist_deep(self): filename = "/tmp/so-yaml_test-append.yaml" @@ -212,11 +212,11 @@ class TestRemove(unittest.TestCase): file.close() with patch('sys.exit', new=MagicMock()) as sysmock: - with patch('sys.stdout', new=StringIO()) as mock_stdout: + with patch('sys.stderr', new=StringIO()) as mock_stderr: sys.argv = ["cmd", "append", filename, "key1.child2.deep1", "h"] soyaml.main() sysmock.assert_called() - self.assertEqual(mock_stdout.getvalue(), "The existing value for the given key is not a list. No action was taken on the file.\n") + self.assertEqual("The existing value for the given key is not a list. No action was taken on the file.\n", mock_stderr.getvalue()) def test_add_key(self): content = {} @@ -244,11 +244,11 @@ class TestRemove(unittest.TestCase): def test_add_missing_arg(self): with patch('sys.exit', new=MagicMock()) as sysmock: - with patch('sys.stderr', new=StringIO()) as mock_stdout: + with patch('sys.stderr', new=StringIO()) as mock_stderr: sys.argv = ["cmd", "help"] soyaml.add(["file", "key"]) sysmock.assert_called() - self.assertIn(mock_stdout.getvalue(), "Missing filename, key arg, and/or value\n") + self.assertIn("Missing filename, key arg, and/or value\n", mock_stderr.getvalue()) def test_add(self): filename = "/tmp/so-yaml_test-add.yaml" @@ -296,11 +296,11 @@ class TestRemove(unittest.TestCase): def test_replace_missing_arg(self): with patch('sys.exit', new=MagicMock()) as sysmock: - with patch('sys.stderr', new=StringIO()) as mock_stdout: + with patch('sys.stderr', new=StringIO()) as mock_stderr: sys.argv = ["cmd", "help"] soyaml.replace(["file", "key"]) sysmock.assert_called() - self.assertIn(mock_stdout.getvalue(), "Missing filename, key arg, and/or value\n") + self.assertIn("Missing filename, key arg, and/or value\n", mock_stderr.getvalue()) def test_replace(self): filename = "/tmp/so-yaml_test-add.yaml" @@ -360,3 +360,66 @@ class TestRemove(unittest.TestCase): self.assertEqual(soyaml.convertType("false"), False) self.assertEqual(soyaml.convertType("FALSE"), False) self.assertEqual(soyaml.convertType(""), "") + + def test_get_int(self): + with patch('sys.stdout', new=StringIO()) as mock_stdout: + filename = "/tmp/so-yaml_test-get.yaml" + file = open(filename, "w") + file.write("{key1: { child1: 123, child2: { deep1: 45 } }, key2: false, key3: [e,f,g]}") + file.close() + + result = soyaml.get([filename, "key1.child2.deep1"]) + self.assertEqual(result, 0) + self.assertIn("45\n...", mock_stdout.getvalue()) + + def test_get_str(self): + with patch('sys.stdout', new=StringIO()) as mock_stdout: + filename = "/tmp/so-yaml_test-get.yaml" + file = open(filename, "w") + file.write("{key1: { child1: 123, child2: { deep1: \"hello\" } }, key2: false, key3: [e,f,g]}") + file.close() + + result = soyaml.get([filename, "key1.child2.deep1"]) + self.assertEqual(result, 0) + self.assertIn("hello\n...", mock_stdout.getvalue()) + + def test_get_list(self): + with patch('sys.stdout', new=StringIO()) as mock_stdout: + filename = "/tmp/so-yaml_test-get.yaml" + file = open(filename, "w") + file.write("{key1: { child1: 123, child2: { deep1: \"hello\" } }, key2: false, key3: [e,f,g]}") + file.close() + + result = soyaml.get([filename, "key3"]) + self.assertEqual(result, 0) + self.assertIn("- e\n- f\n- g\n", mock_stdout.getvalue()) + + def test_get_dict(self): + with patch('sys.stdout', new=StringIO()) as mock_stdout: + filename = "/tmp/so-yaml_test-get.yaml" + file = open(filename, "w") + file.write("{key1: { child1: 123, child2: { deep1: \"hello\" } }, key2: false, key3: [e,f,g]}") + file.close() + + result = soyaml.get([filename, "key1"]) + self.assertEqual(result, 0) + self.assertIn("child1: 123\nchild2:\n deep1: hello\n", mock_stdout.getvalue()) + + def test_get_missing(self): + with patch('sys.stdout', new=StringIO()) as mock_stdout: + filename = "/tmp/so-yaml_test-get.yaml" + file = open(filename, "w") + file.write("{key1: { child1: 123, child2: { deep1: 45 } }, key2: false, key3: [e,f,g]}") + file.close() + + result = soyaml.get([filename, "key1.child2.deep3"]) + self.assertEqual(result, 2) + self.assertEqual("", mock_stdout.getvalue()) + + def test_get_usage(self): + with patch('sys.exit', new=MagicMock()) as sysmock: + with patch('sys.stderr', new=StringIO()) as mock_stderr: + result = soyaml.get([]) + self.assertEqual(result, 1) + self.assertIn("Missing filename or key arg", mock_stderr.getvalue()) + sysmock.assert_called_once_with(1) From a39c88c7b4c678aad35edd9c2c3e1fcff76a1dc4 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 6 Jun 2024 12:56:24 -0400 Subject: [PATCH 675/777] add set to troubleshoot failure --- salt/manager/tools/sbin/soup | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index c510e832b..c09db0626 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -350,6 +350,7 @@ masterunlock() { phases_pillar_2_4_80() { echo "Checking if pillar value: elasticsearch.index_settings.global_overrides.index_template.phases exists" + set +e #so-yaml.py remove /opt/so/saltstack/local/pillar/elasticsearch/soc_elasticsearch.sls elasticsearch.index_settings.global_overrides.index_template.phases #if so-yaml removed stuff add this message to the FINAL_MESSAGE_QUEUE read -r -d '' msg << EOF @@ -361,6 +362,7 @@ phases_pillar_2_4_80() { EOF FINAL_MESSAGE_QUEUE+=("$msg") + set -e } preupgrade_changes() { From e85c3e5b27b026238d7ebf9bb39f23f2aa6fc97a Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Wed, 5 Jun 2024 14:45:06 -0600 Subject: [PATCH 676/777] SOC Proxy Setting The so_proxy value we build during install is now copied to SOC's config. --- salt/soc/defaults.yaml | 1 + salt/soc/merged.map.jinja | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index f5628f3c3..65fb450d9 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1286,6 +1286,7 @@ soc: maxPacketCount: 5000 htmlDir: html importUploadDir: /nsm/soc/uploads + proxy: '' modules: cases: soc filedatastore: diff --git a/salt/soc/merged.map.jinja b/salt/soc/merged.map.jinja index f2c88fde9..4ee0eea1e 100644 --- a/salt/soc/merged.map.jinja +++ b/salt/soc/merged.map.jinja @@ -1,5 +1,5 @@ {# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one - or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at + or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at https://securityonion.net/license; you may not use this file except in compliance with the Elastic License 2.0. #} @@ -11,6 +11,9 @@ {% set SOCMERGED = salt['pillar.get']('soc', SOCDEFAULTS, merge=true) %} +{% set MANAGER_PROXY = salt['pillar.get']('manager:proxy', '') %} +{% do SOCMERGED.config.server.update({'proxy': MANAGER_PROXY}) %} + {# if SOCMERGED.config.server.modules.cases == httpcase details come from the soc pillar #} {% if SOCMERGED.config.server.modules.cases != 'soc' %} {% do SOCMERGED.config.server.modules.elastic.update({'casesEnabled': false}) %} From 42818a9950bb7832e08099aa243fcafd71c8f75f Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Thu, 6 Jun 2024 13:28:07 -0600 Subject: [PATCH 677/777] Remove proxy from SOC defaults --- salt/soc/defaults.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 65fb450d9..f5628f3c3 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1286,7 +1286,6 @@ soc: maxPacketCount: 5000 htmlDir: html importUploadDir: /nsm/soc/uploads - proxy: '' modules: cases: soc filedatastore: From f37f5ba97b4fa1b79de7d1325f639db3d7f94fc5 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 6 Jun 2024 15:57:58 -0400 Subject: [PATCH 678/777] Update soc_suricata.yaml --- salt/suricata/soc_suricata.yaml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/salt/suricata/soc_suricata.yaml b/salt/suricata/soc_suricata.yaml index e157ff852..1ecabacd8 100644 --- a/salt/suricata/soc_suricata.yaml +++ b/salt/suricata/soc_suricata.yaml @@ -150,13 +150,16 @@ suricata: helpLink: suricata.html vars: address-groups: - HOME_NET: &suriaddressgroup + HOME_NET: description: Assign a list of hosts, or networks, using CIDR notation, to this Suricata variable. The variable can then be re-used within Suricata rules. This allows for a single adjustment to the variable that will then affect all rules referencing the variable. regex: ^(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\/([0-9]|[1-2][0-9]|3[0-2]))?$|^((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?))|:))|(([0-9A-Fa-f]{1,4}:){5}((:[0-9A-Fa-f]{1,4}){1,2}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){4}((:[0-9A-Fa-f]{1,4}){1,3}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){3}((:[0-9A-Fa-f]{1,4}){1,4}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){2}((:[0-9A-Fa-f]{1,4}){1,5}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){1}((:[0-9A-Fa-f]{1,4}){1,6}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(:((:[0-9A-Fa-f]{1,4}){1,7}|:)))(\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8]))?$ regexFailureMessage: You must enter a valid IP address or CIDR. helpLink: suricata.html duplicates: True - EXTERNAL_NET: *suriaddressgroup + EXTERNAL_NET: &suriaddressgroup + description: Assign a list of hosts, or networks, or other customization, to this Suricata variable. The variable can then be re-used within Suricata rules. This allows for a single adjustment to the variable that will then affect all rules referencing the variable. + helpLink: suricata.html + duplicates: True HTTP_SERVERS: *suriaddressgroup SMTP_SERVERS: *suriaddressgroup SQL_SERVERS: *suriaddressgroup From d3b81babec949fc9631de3be24768ec1971389ea Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 6 Jun 2024 16:15:21 -0400 Subject: [PATCH 679/777] check for phases with so-yaml, remove if exists --- salt/manager/tools/sbin/soup | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index c09db0626..1850c2b9b 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -349,20 +349,24 @@ masterunlock() { phases_pillar_2_4_80() { echo "Checking if pillar value: elasticsearch.index_settings.global_overrides.index_template.phases exists" - - set +e - #so-yaml.py remove /opt/so/saltstack/local/pillar/elasticsearch/soc_elasticsearch.sls elasticsearch.index_settings.global_overrides.index_template.phases - #if so-yaml removed stuff add this message to the FINAL_MESSAGE_QUEUE - read -r -d '' msg << EOF - Found elasticsearch.index_settings.global_overrides.index_template.phases set to: - so-yaml removed stuff here - A backup of all pillars was saved to /nsm/backup/ - Removed unused pillar value: elasticsearch.index_settings.global_overrides.index_template.phases - If you want to set policies, navigate to the SOC Grid Configuration UI at elasticsearch.index_settings.global_overrides.policy.phases + PHASES=$(so-yaml.py get /opt/so/saltstack/local/pillar/elasticsearch/soc_elasticsearch.sls elasticsearch.index_settings.global_overrides.index_template.phases) + case $? in + 0) + so-yaml.py remove /opt/so/saltstack/local/pillar/elasticsearch/soc_elasticsearch.sls elasticsearch.index_settings.global_overrides.index_template.phases + set +e + read -r -d '' msg << EOF + Found elasticsearch.index_settings.global_overrides.index_template.phases was set to: + ${PHASES} + Removed unused pillar value: elasticsearch.index_settings.global_overrides.index_template.phases + If you want to set policies, navigate to the SOC Grid Configuration UI at elasticsearch.index_settings.global_overrides.policy.phases + A backup of all pillars was saved to /nsm/backup/ EOF - - FINAL_MESSAGE_QUEUE+=("$msg") - set -e + FINAL_MESSAGE_QUEUE+=("$msg") + set -e + ;; + 2) echo "Pillar elasticsearch.index_settings.global_overrides.index_template.phases does not exist. No action taken." ;; + *) echo "so-yaml.py returned something other than 0 or 2 exit code" ;; # we shouldn't see this + esac } preupgrade_changes() { From d39c8fae54abfb4625a0de35de2d4ee3b0d7ac83 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 7 Jun 2024 09:01:16 -0400 Subject: [PATCH 680/777] format output --- salt/manager/tools/sbin/soup | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 1850c2b9b..81a7545d7 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -354,12 +354,13 @@ phases_pillar_2_4_80() { 0) so-yaml.py remove /opt/so/saltstack/local/pillar/elasticsearch/soc_elasticsearch.sls elasticsearch.index_settings.global_overrides.index_template.phases set +e - read -r -d '' msg << EOF - Found elasticsearch.index_settings.global_overrides.index_template.phases was set to: - ${PHASES} - Removed unused pillar value: elasticsearch.index_settings.global_overrides.index_template.phases - If you want to set policies, navigate to the SOC Grid Configuration UI at elasticsearch.index_settings.global_overrides.policy.phases - A backup of all pillars was saved to /nsm/backup/ + read -r -d '' msg <<- EOF + Found elasticsearch.index_settings.global_overrides.index_template.phases was set to: + ${PHASES} + + Removed unused pillar value: elasticsearch.index_settings.global_overrides.index_template.phases + To set policies, navigate to the SOC Grid Configuration UI at elasticsearch.index_settings.global_overrides.policy.phases + A backup of all pillar files was saved to /nsm/backup/ EOF FINAL_MESSAGE_QUEUE+=("$msg") set -e From f5cc35509b48bbb944b4922a94105193c89e545e Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 7 Jun 2024 11:03:26 -0400 Subject: [PATCH 681/777] fix output alignment --- salt/manager/tools/sbin/soup | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 81a7545d7..0ab8d9d46 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -355,12 +355,12 @@ phases_pillar_2_4_80() { so-yaml.py remove /opt/so/saltstack/local/pillar/elasticsearch/soc_elasticsearch.sls elasticsearch.index_settings.global_overrides.index_template.phases set +e read -r -d '' msg <<- EOF - Found elasticsearch.index_settings.global_overrides.index_template.phases was set to: - ${PHASES} +Found elasticsearch.index_settings.global_overrides.index_template.phases was set to: +${PHASES} - Removed unused pillar value: elasticsearch.index_settings.global_overrides.index_template.phases - To set policies, navigate to the SOC Grid Configuration UI at elasticsearch.index_settings.global_overrides.policy.phases - A backup of all pillar files was saved to /nsm/backup/ +Removed unused pillar value: elasticsearch.index_settings.global_overrides.index_template.phases +To set policies, navigate to the SOC Grid Configuration UI at elasticsearch.index_settings.global_overrides.policy.phases +A backup of all pillar files was saved to /nsm/backup/ EOF FINAL_MESSAGE_QUEUE+=("$msg") set -e From fa063722e102cc07da2f076f64375f061bfea2ad Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Thu, 6 Jun 2024 16:36:09 -0600 Subject: [PATCH 682/777] RootCA and InsecureSkipVerify New empty settings and their annotations. --- salt/soc/defaults.yaml | 2 ++ salt/soc/soc_soc.yaml | 8 ++++++++ 2 files changed, 10 insertions(+) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index f5628f3c3..03476c3f5 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1286,6 +1286,8 @@ soc: maxPacketCount: 5000 htmlDir: html importUploadDir: /nsm/soc/uploads + rootCA: '' + insecureSkipVerify: false modules: cases: soc filedatastore: diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index 47d051e4e..ec633f773 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -81,6 +81,14 @@ soc: description: Maximum number of packets to show in the PCAP viewer. Larger values can cause more resource utilization on both the SOC server and the browser. global: True advanced: True + rootCA: + description: Root Certificate Authority (CA) public key in PEM format that SOC will use to validate outgoing requests. This is useful when the SOC server is behind a reverse proxy that performs SSL termination. + multiline: True + advanced: True + insecureSkipVerify: + description: Disable TLS verification for outgoing requests. This will make your installation less secure to MITM attacks. Recommended only for debugging purposes. + advanced: True + forcedType: bool modules: elastalertengine: additionalAlerters: From 5d3fd3d389b7ed5b751d0229153c05461966f472 Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Fri, 7 Jun 2024 12:47:09 -0600 Subject: [PATCH 683/777] AdditionalCA and InsecureSkipVerify New fields have been added to manager and then duplicated over to SOC's config in the same vein as how proxy was updated earlier this week. AdditionalCA holds the PEM formatted public keys that should be trusted when making requests. It has been implemented for both Sigma's zip downloads and Sigma and Suricata's repository clones and pulls. InsecureSkipVerify has been added to help our users troubleshoot their configuration. Setting it to true will not verify the cert on outgoing requests. Self signed, missing, or invalid certs will not throw an error. --- salt/manager/defaults.yaml | 4 +++- salt/manager/map.jinja | 7 +++++++ salt/manager/soc_manager.yaml | 18 +++++++++++++++--- salt/soc/merged.map.jinja | 6 ++++-- salt/soc/soc_soc.yaml | 8 -------- 5 files changed, 29 insertions(+), 14 deletions(-) create mode 100644 salt/manager/map.jinja diff --git a/salt/manager/defaults.yaml b/salt/manager/defaults.yaml index 8bb34690e..708900af6 100644 --- a/salt/manager/defaults.yaml +++ b/salt/manager/defaults.yaml @@ -2,4 +2,6 @@ manager: reposync: enabled: True hour: 3 - minute: 0 \ No newline at end of file + minute: 0 + additionalCA: '' + insecureSkipVerify: False diff --git a/salt/manager/map.jinja b/salt/manager/map.jinja new file mode 100644 index 000000000..1ab9c12c3 --- /dev/null +++ b/salt/manager/map.jinja @@ -0,0 +1,7 @@ +{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one + or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at + https://securityonion.net/license; you may not use this file except in compliance with the + Elastic License 2.0. #} + +{% import_yaml 'manager/defaults.yaml' as MANAGERDEFAULTS %} +{% set MANAGERMERGED = salt['pillar.get']('manager', MANAGERDEFAULTS.manager, merge=True) %} \ No newline at end of file diff --git a/salt/manager/soc_manager.yaml b/salt/manager/soc_manager.yaml index f6461a0c7..f3346269e 100644 --- a/salt/manager/soc_manager.yaml +++ b/salt/manager/soc_manager.yaml @@ -7,7 +7,7 @@ manager: hour: description: The hour of the day in which the repo sync takes place. global: True - helpLink: soup.html + helpLink: soup.html minute: description: The minute within the hour to run the repo sync. global: True @@ -16,11 +16,23 @@ manager: description: Enable elastalert 1=enabled 0=disabled. global: True helpLink: elastalert.html - no_proxy: - description: String of hosts to ignore the proxy settings for. + no_proxy: + description: String of hosts to ignore the proxy settings for. global: True helpLink: proxy.html proxy: description: Proxy server to use for updates. global: True helpLink: proxy.html + additionalCA: + description: Additional CA certificates to trust in PEM format. + global: True + advanced: True + multiline: True + helpLink: proxy.html + insecureSkipVerify: + description: Disable TLS verification for outgoing requests. This will make your installation less secure to MITM attacks. Recommended only for debugging purposes. + advanced: True + forcedType: bool + global: True + helpLink: proxy.html diff --git a/salt/soc/merged.map.jinja b/salt/soc/merged.map.jinja index 4ee0eea1e..c823175cb 100644 --- a/salt/soc/merged.map.jinja +++ b/salt/soc/merged.map.jinja @@ -6,13 +6,15 @@ {% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'soc/defaults.map.jinja' import SOCDEFAULTS with context %} {% from 'logstash/map.jinja' import LOGSTASH_NODES %} +{% from 'manager/map.jinja' import MANAGERMERGED %} {% set DOCKER_EXTRA_HOSTS = LOGSTASH_NODES %} {% do DOCKER_EXTRA_HOSTS.append({GLOBALS.influxdb_host:pillar.node_data[GLOBALS.influxdb_host].ip}) %} {% set SOCMERGED = salt['pillar.get']('soc', SOCDEFAULTS, merge=true) %} -{% set MANAGER_PROXY = salt['pillar.get']('manager:proxy', '') %} -{% do SOCMERGED.config.server.update({'proxy': MANAGER_PROXY}) %} +{% do SOCMERGED.config.server.update({'proxy': MANAGERMERGED.proxy}) %} +{% do SOCMERGED.config.server.update({'additionalCA': MANAGERMERGED.additionalCA}) %} +{% do SOCMERGED.config.server.update({'insecureSkipVerify': MANAGERMERGED.insecureSkipVerify}) %} {# if SOCMERGED.config.server.modules.cases == httpcase details come from the soc pillar #} {% if SOCMERGED.config.server.modules.cases != 'soc' %} diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index ec633f773..47d051e4e 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -81,14 +81,6 @@ soc: description: Maximum number of packets to show in the PCAP viewer. Larger values can cause more resource utilization on both the SOC server and the browser. global: True advanced: True - rootCA: - description: Root Certificate Authority (CA) public key in PEM format that SOC will use to validate outgoing requests. This is useful when the SOC server is behind a reverse proxy that performs SSL termination. - multiline: True - advanced: True - insecureSkipVerify: - description: Disable TLS verification for outgoing requests. This will make your installation less secure to MITM attacks. Recommended only for debugging purposes. - advanced: True - forcedType: bool modules: elastalertengine: additionalAlerters: From ee696be51d5fd01276aa8db15b26af4a3a44d40c Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Fri, 7 Jun 2024 13:04:54 -0600 Subject: [PATCH 684/777] Remove rootCA and insecureSkipVerify from SOC defaults --- salt/soc/defaults.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 03476c3f5..f5628f3c3 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1286,8 +1286,6 @@ soc: maxPacketCount: 5000 htmlDir: html importUploadDir: /nsm/soc/uploads - rootCA: '' - insecureSkipVerify: false modules: cases: soc filedatastore: From dbc56ffee787feb6f124db5e54bbd66b7c87734f Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Fri, 7 Jun 2024 15:09:09 -0400 Subject: [PATCH 685/777] Update defaults.yaml --- salt/firewall/defaults.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/salt/firewall/defaults.yaml b/salt/firewall/defaults.yaml index b10505956..0f7ce911a 100644 --- a/salt/firewall/defaults.yaml +++ b/salt/firewall/defaults.yaml @@ -1293,6 +1293,9 @@ firewall: beats_endpoint_ssl: portgroups: - beats_5644 + elastic_agent_endpoint: + portgroups: + - elastic_agent_data endgame: portgroups: - endgame From 4057238185aad7a4c8c58b226e25f23b84ef09bb Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Fri, 7 Jun 2024 15:33:49 -0400 Subject: [PATCH 686/777] Update defaults.yaml --- salt/firewall/defaults.yaml | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/salt/firewall/defaults.yaml b/salt/firewall/defaults.yaml index 0f7ce911a..5d53b9864 100644 --- a/salt/firewall/defaults.yaml +++ b/salt/firewall/defaults.yaml @@ -1267,26 +1267,37 @@ firewall: chain: DOCKER-USER: hostgroups: + desktop: + portgroups: + - elastic_agent_data fleet: portgroups: - - beats_5056 + - elastic_agent_data + idh: + portgroups: + - elastic_agent_data sensor: portgroups: - - beats_5044 - - beats_5644 - elastic_agent_data searchnode: portgroups: - redis - - beats_5644 + - elastic_agent_data + standalone: + portgroups: + - redis + - elastic_agent_data + manager: + portgroups: + - elastic_agent_data managersearch: portgroups: - redis - - beats_5644 + - elastic_agent_data self: portgroups: - redis - - beats_5644 + - elastic_agent_data beats_endpoint: portgroups: - beats_5044 From 0139e1827113b3ef81a20487409145116fdd46f6 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 7 Jun 2024 16:03:21 -0400 Subject: [PATCH 687/777] additional description --- salt/manager/tools/sbin/soup | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 0ab8d9d46..6adb39f2f 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -1295,6 +1295,7 @@ EOF # check if the FINAL_MESSAGE_QUEUE is not empty if (( ${#FINAL_MESSAGE_QUEUE[@]} != 0 )); then +echo "The following additional information specifically applies to your grid:\n" for m in "${FINAL_MESSAGE_QUEUE[@]}"; do echo "$m" echo From f2f688b9b8b1e5eb1467eb140efbba6df590d87e Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Fri, 7 Jun 2024 16:18:09 -0400 Subject: [PATCH 688/777] Update soup --- salt/manager/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 6adb39f2f..0d52e5c16 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -1295,7 +1295,7 @@ EOF # check if the FINAL_MESSAGE_QUEUE is not empty if (( ${#FINAL_MESSAGE_QUEUE[@]} != 0 )); then -echo "The following additional information specifically applies to your grid:\n" + echo "The following additional information applies specifically to your grid:\n" for m in "${FINAL_MESSAGE_QUEUE[@]}"; do echo "$m" echo From f96b82b11203e7ca400f5dfa131556fc075799e2 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Sat, 8 Jun 2024 07:44:46 -0400 Subject: [PATCH 689/777] gracefully handle missing parent key --- salt/manager/tools/sbin/so-yaml.py | 2 +- salt/manager/tools/sbin/so-yaml_test.py | 11 +++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/so-yaml.py b/salt/manager/tools/sbin/so-yaml.py index cddc827b5..275032ee0 100755 --- a/salt/manager/tools/sbin/so-yaml.py +++ b/salt/manager/tools/sbin/so-yaml.py @@ -170,7 +170,7 @@ def replace(args): def getKeyValue(content, key): pieces = key.split(".", 1) - if len(pieces) > 1: + if len(pieces) > 1 and pieces[0] in content: return getKeyValue(content[pieces[0]], pieces[1]) return content.get(key, None) diff --git a/salt/manager/tools/sbin/so-yaml_test.py b/salt/manager/tools/sbin/so-yaml_test.py index ca9839e02..5ca46cb68 100644 --- a/salt/manager/tools/sbin/so-yaml_test.py +++ b/salt/manager/tools/sbin/so-yaml_test.py @@ -416,6 +416,17 @@ class TestRemove(unittest.TestCase): self.assertEqual(result, 2) self.assertEqual("", mock_stdout.getvalue()) + def test_get_missing_parent(self): + with patch('sys.stdout', new=StringIO()) as mock_stdout: + filename = "/tmp/so-yaml_test-get.yaml" + file = open(filename, "w") + file.write("{key1: { child1: 123, child2: { deep1: 45 } }, key2: false, key3: [e,f,g]}") + file.close() + + result = soyaml.get([filename, "key1.child3.deep3"]) + self.assertEqual(result, 2) + self.assertEqual("", mock_stdout.getvalue()) + def test_get_usage(self): with patch('sys.exit', new=MagicMock()) as sysmock: with patch('sys.stderr', new=StringIO()) as mock_stderr: From f1638faa3a669c91f93e866a34ce19424fb34b7f Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Sat, 8 Jun 2024 08:18:34 -0400 Subject: [PATCH 690/777] correct placement of error check override --- salt/manager/tools/sbin/soup | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 0d52e5c16..9ee4058f9 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -349,11 +349,11 @@ masterunlock() { phases_pillar_2_4_80() { echo "Checking if pillar value: elasticsearch.index_settings.global_overrides.index_template.phases exists" + set +e PHASES=$(so-yaml.py get /opt/so/saltstack/local/pillar/elasticsearch/soc_elasticsearch.sls elasticsearch.index_settings.global_overrides.index_template.phases) case $? in 0) so-yaml.py remove /opt/so/saltstack/local/pillar/elasticsearch/soc_elasticsearch.sls elasticsearch.index_settings.global_overrides.index_template.phases - set +e read -r -d '' msg <<- EOF Found elasticsearch.index_settings.global_overrides.index_template.phases was set to: ${PHASES} @@ -363,11 +363,11 @@ To set policies, navigate to the SOC Grid Configuration UI at elasticsearch.inde A backup of all pillar files was saved to /nsm/backup/ EOF FINAL_MESSAGE_QUEUE+=("$msg") - set -e ;; 2) echo "Pillar elasticsearch.index_settings.global_overrides.index_template.phases does not exist. No action taken." ;; *) echo "so-yaml.py returned something other than 0 or 2 exit code" ;; # we shouldn't see this esac + set -e } preupgrade_changes() { From 284c1be85fc058945e772b5ee02898d13594e070 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Mon, 10 Jun 2024 11:08:54 -0400 Subject: [PATCH 691/777] Update Kafka controller(s) via SOC UI Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/config.map.jinja | 2 -- salt/kafka/config.sls | 6 ++++++ salt/kafka/defaults.yaml | 2 +- salt/kafka/enabled.sls | 2 -- salt/kafka/nodes.map.jinja | 5 +++-- salt/kafka/soc_kafka.yaml | 5 ++--- salt/salt/files/engines.conf | 18 ++++++++++++++++++ 7 files changed, 30 insertions(+), 10 deletions(-) diff --git a/salt/kafka/config.map.jinja b/salt/kafka/config.map.jinja index e5b77db11..88d27c1a8 100644 --- a/salt/kafka/config.map.jinja +++ b/salt/kafka/config.map.jinja @@ -7,8 +7,6 @@ {% set KAFKA_NODES_PILLAR = salt['pillar.get']('kafka:nodes') %} -{% set KAFKA_CONTROLLERS_PILLAR = salt['pillar.get']('kafka:kafka_controllers', default=None) %} - {# Create list of KRaft controllers #} {% set controllers = [] %} diff --git a/salt/kafka/config.sls b/salt/kafka/config.sls index 5cf6f8201..165daf7eb 100644 --- a/salt/kafka/config.sls +++ b/salt/kafka/config.sls @@ -66,6 +66,12 @@ kafka_kraft_{{sc}}_properties: - show_changes: False {% endfor %} +reset_quorum_on_changes: + cmd.run: + - name: rm -f /nsm/kafka/data/__cluster_metadata-0/quorum-state + - watch: + - file: /opt/so/conf/kafka/server.properties + {% else %} {{sls}}_state_not_allowed: diff --git a/salt/kafka/defaults.yaml b/salt/kafka/defaults.yaml index 56ad9252f..f45560e60 100644 --- a/salt/kafka/defaults.yaml +++ b/salt/kafka/defaults.yaml @@ -2,7 +2,7 @@ kafka: enabled: False cluster_id: kafka_pass: - kafka_controllers: [] + kafka_controllers: config: broker: advertised_x_listeners: diff --git a/salt/kafka/enabled.sls b/salt/kafka/enabled.sls index e90a314d2..75cf71148 100644 --- a/salt/kafka/enabled.sls +++ b/salt/kafka/enabled.sls @@ -53,9 +53,7 @@ so-kafka: - /nsm/kafka/data/:/nsm/kafka/data/:rw - /opt/so/log/kafka:/opt/kafka/logs/:rw - /opt/so/conf/kafka/server.properties:/opt/kafka/config/kraft/server.properties:ro - {% if GLOBALS.is_manager %} - /opt/so/conf/kafka/client.properties:/opt/kafka/config/kraft/client.properties - {% endif %} - watch: {% for sc in ['server', 'client'] %} - file: kafka_kraft_{{sc}}_properties diff --git a/salt/kafka/nodes.map.jinja b/salt/kafka/nodes.map.jinja index fa33adda5..c0b98de14 100644 --- a/salt/kafka/nodes.map.jinja +++ b/salt/kafka/nodes.map.jinja @@ -68,14 +68,15 @@ {# Update the process_x_roles value for any host in the kafka_controllers_pillar configured from SOC UI #} {% set ns = namespace(has_controller=false) %} {% if KAFKA_CONTROLLERS_PILLAR != none %} -{% for hostname in KAFKA_CONTROLLERS_PILLAR %} +{% set KAFKA_CONTROLLERS_PILLAR_LIST = KAFKA_CONTROLLERS_PILLAR.split(',') %} +{% for hostname in KAFKA_CONTROLLERS_PILLAR_LIST %} {% if hostname in COMBINED_KAFKANODES %} {% do COMBINED_KAFKANODES[hostname].update({'role': 'controller'}) %} {% set ns.has_controller = true %} {% endif %} {% endfor %} {% for hostname in COMBINED_KAFKANODES %} -{% if hostname not in KAFKA_CONTROLLERS_PILLAR %} +{% if hostname not in KAFKA_CONTROLLERS_PILLAR_LIST %} {% do COMBINED_KAFKANODES[hostname].update({'role': 'broker'}) %} {% endif %} {% endfor %} diff --git a/salt/kafka/soc_kafka.yaml b/salt/kafka/soc_kafka.yaml index b1de1f243..05f047c4a 100644 --- a/salt/kafka/soc_kafka.yaml +++ b/salt/kafka/soc_kafka.yaml @@ -13,9 +13,8 @@ kafka: sensitive: True helpLink: kafka.html kafka_controllers: - description: A list of Security Onion grid members that should act as controllers for this Kafka cluster. By default, the grid manager will use a 'combined' role where it will act as both a broker and controller. Keep total Kafka controllers to an odd number and ensure you do not assign ALL your Kafka nodes as controllers or this Kafka cluster will not start. - forcedType: "[]string" - multiline: True + description: A comma-seperated list of Security Onion grid members that should act as controllers for this Kafka cluster. By default, the grid manager will use a 'combined' role where it will act as both a broker and controller. Keep total Kafka controllers to an odd number and ensure you do not assign ALL your Kafka nodes as controllers or this Kafka cluster will not start. + forcedType: "string" helpLink: kafka.html config: broker: diff --git a/salt/salt/files/engines.conf b/salt/salt/files/engines.conf index 69d596ed0..de5685fff 100644 --- a/salt/salt/files/engines.conf +++ b/salt/salt/files/engines.conf @@ -57,4 +57,22 @@ engines: cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' saltutil.kill_all_jobs - cmd.run: cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' state.highstate + - files: + - /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls + - /opt/so/saltstack/local/pillar/kafka/adv_kafka.sls + pillar: kafka.kafka_controllers + default: '' + actions: + from: + '*': + to: + '*': + - cmd.run: + cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' saltutil.kill_all_jobs + - cmd.run: + cmd: salt-call state.apply kafka.nodes + - cmd.run: + cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' state.apply kafka + - cmd.run: + cmd: salt-call state.apply elasticfleet interval: 10 From adeab10f6d88bdf733e7b7284178eb172fbe95d9 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 10 Jun 2024 12:14:27 -0400 Subject: [PATCH 692/777] upgrade docker and containerd.io for oracle --- salt/docker/init.sls | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/docker/init.sls b/salt/docker/init.sls index 769c58af8..281fbc4ac 100644 --- a/salt/docker/init.sls +++ b/salt/docker/init.sls @@ -51,10 +51,10 @@ dockerheldpackages: dockerheldpackages: pkg.installed: - pkgs: - - containerd.io: 1.6.21-3.1.el9 - - docker-ce: 24.0.4-1.el9 - - docker-ce-cli: 24.0.4-1.el9 - - docker-ce-rootless-extras: 24.0.4-1.el9 + - containerd.io: 1.6.33-3.1.el9 + - docker-ce: 26.1.4-1.el9 + - docker-ce-cli: 26.1.4-1.el9 + - docker-ce-rootless-extras: 26.1.4-1.el9 - hold: True - update_holds: True {% endif %} From c6d0a1766937ebc7ae7425e7cba2d23271c93c17 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 10 Jun 2024 15:43:29 -0400 Subject: [PATCH 693/777] docker upgrade debian 12 --- salt/docker/init.sls | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/docker/init.sls b/salt/docker/init.sls index 281fbc4ac..732a9d7dd 100644 --- a/salt/docker/init.sls +++ b/salt/docker/init.sls @@ -20,10 +20,10 @@ dockergroup: dockerheldpackages: pkg.installed: - pkgs: - - containerd.io: 1.6.21-1 - - docker-ce: 5:24.0.3-1~debian.12~bookworm - - docker-ce-cli: 5:24.0.3-1~debian.12~bookworm - - docker-ce-rootless-extras: 5:24.0.3-1~debian.12~bookworm + - containerd.io: 1.6.33-1 + - docker-ce: 5:26.1.4-1~debian.12~bookworm + - docker-ce-cli: 5:26.1.4-1~debian.12~bookworm + - docker-ce-rootless-extras: 5:26.1.4-1~debian.12~bookworm - hold: True - update_holds: True {% elif grains.oscodename == 'jammy' %} From dbd987345075eea5a3d28f188eb6543b2d8bc30e Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 10 Jun 2024 16:04:11 -0400 Subject: [PATCH 694/777] upgrade docker for jammy --- salt/docker/init.sls | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/docker/init.sls b/salt/docker/init.sls index 732a9d7dd..9140c1c33 100644 --- a/salt/docker/init.sls +++ b/salt/docker/init.sls @@ -30,10 +30,10 @@ dockerheldpackages: dockerheldpackages: pkg.installed: - pkgs: - - containerd.io: 1.6.21-1 - - docker-ce: 5:24.0.2-1~ubuntu.22.04~jammy - - docker-ce-cli: 5:24.0.2-1~ubuntu.22.04~jammy - - docker-ce-rootless-extras: 5:24.0.2-1~ubuntu.22.04~jammy + - containerd.io: 1.6.33-1 + - docker-ce: 5:26.1.4-1~ubuntu.22.04~jammy + - docker-ce-cli: 5:26.1.4-1~ubuntu.22.04~jammy + - docker-ce-rootless-extras: 5:26.1.4-1~ubuntu.22.04~jammy - hold: True - update_holds: True {% else %} From 0b1e3b2a7f066b3bfe219ee028acda912422c06c Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 10 Jun 2024 16:24:44 -0400 Subject: [PATCH 695/777] upgrade docker for focal --- salt/docker/init.sls | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/docker/init.sls b/salt/docker/init.sls index 9140c1c33..a02aa4a3b 100644 --- a/salt/docker/init.sls +++ b/salt/docker/init.sls @@ -40,10 +40,10 @@ dockerheldpackages: dockerheldpackages: pkg.installed: - pkgs: - - containerd.io: 1.4.9-1 - - docker-ce: 5:20.10.8~3-0~ubuntu-focal - - docker-ce-cli: 5:20.10.5~3-0~ubuntu-focal - - docker-ce-rootless-extras: 5:20.10.5~3-0~ubuntu-focal + - containerd.io: 1.6.33-1 + - docker-ce: 5:26.1.4-1~ubuntu.20.04~focal + - docker-ce-cli: 5:26.1.4-1~ubuntu.20.04~focal + - docker-ce-rootless-extras: 5:26.1.4-1~ubuntu.20.04~focal - hold: True - update_holds: True {% endif %} From 4b481bd405ea088a9d7e6f36d6b512ad2fa4caea Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 11 Jun 2024 09:41:58 -0400 Subject: [PATCH 696/777] add epoch to docker for oracle --- salt/docker/init.sls | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/docker/init.sls b/salt/docker/init.sls index a02aa4a3b..1e37364bc 100644 --- a/salt/docker/init.sls +++ b/salt/docker/init.sls @@ -52,8 +52,8 @@ dockerheldpackages: pkg.installed: - pkgs: - containerd.io: 1.6.33-3.1.el9 - - docker-ce: 26.1.4-1.el9 - - docker-ce-cli: 26.1.4-1.el9 + - docker-ce: 3:26.1.4-1.el9 + - docker-ce-cli: 1:26.1.4-1.el9 - docker-ce-rootless-extras: 26.1.4-1.el9 - hold: True - update_holds: True From 08d2a6242dfa11ce9a89798d4fd8007df9d4cb9f Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Tue, 11 Jun 2024 10:03:33 -0400 Subject: [PATCH 697/777] Add new bind - suricata all.rules --- salt/soc/enabled.sls | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/soc/enabled.sls b/salt/soc/enabled.sls index 4d4b5f6fd..9b50b449b 100644 --- a/salt/soc/enabled.sls +++ b/salt/soc/enabled.sls @@ -27,6 +27,7 @@ so-soc: - /opt/so/conf/strelka:/opt/sensoroni/yara:rw - /opt/so/conf/sigma:/opt/sensoroni/sigma:rw - /opt/so/rules/elastalert/rules:/opt/sensoroni/elastalert:rw + - /opt/so/rules/nids/suri:/opt/sensoroni/nids:ro - /opt/so/conf/soc/fingerprints:/opt/sensoroni/fingerprints:rw - /nsm/soc/jobs:/opt/sensoroni/jobs:rw - /nsm/soc/uploads:/nsm/soc/uploads:rw From 08557ae2875ff6fb8959bd28573394acede60a6b Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 11 Jun 2024 11:01:34 -0400 Subject: [PATCH 698/777] kafka.id field should only be present when metadata for kafka exists Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 b/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 index a17b3a17a..899ec0c64 100644 --- a/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 +++ b/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 @@ -84,7 +84,7 @@ { "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } }, { "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } }, { "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } }, - { "set": { "field": "kafka.id", "value": "{{metadata.kafka.partition}}-{{metadata.kafka.offset}}-{{metadata.kafka.timestamp}}", "ignore_failure": true } }, + { "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}-{{metadata.kafka.offset}}-{{metadata.kafka.timestamp}}", "ignore_failure": true } }, { "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp" ], "ignore_missing": true, "ignore_failure": true } } ], "on_failure": [ From ca7b89c308ede9fad5429c7e6d4061590a936911 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 11 Jun 2024 11:21:13 -0400 Subject: [PATCH 699/777] Added Kafka reset to SOC UI. Incase of changing an active broker to a controller topics may become unavailable. Resolving this would require manual intervention. This option allows running a reset to start from a clean slate to then configure cluster to desired state before reenabling Kafka as global pipeline. Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/defaults.yaml | 1 + salt/kafka/disabled.sls | 3 --- salt/kafka/reset_kafka.sls | 9 +++++++++ salt/kafka/soc_kafka.yaml | 7 +++++-- salt/salt/files/engines.conf | 16 ++++++++++++++++ 5 files changed, 31 insertions(+), 5 deletions(-) create mode 100644 salt/kafka/reset_kafka.sls diff --git a/salt/kafka/defaults.yaml b/salt/kafka/defaults.yaml index f45560e60..e029bc251 100644 --- a/salt/kafka/defaults.yaml +++ b/salt/kafka/defaults.yaml @@ -3,6 +3,7 @@ kafka: cluster_id: kafka_pass: kafka_controllers: + reset_kafka: config: broker: advertised_x_listeners: diff --git a/salt/kafka/disabled.sls b/salt/kafka/disabled.sls index 0027fbfb9..4678e2602 100644 --- a/salt/kafka/disabled.sls +++ b/salt/kafka/disabled.sls @@ -3,9 +3,6 @@ # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. -include: - - kafka.sostatus - so-kafka: docker_container.absent: - force: True diff --git a/salt/kafka/reset_kafka.sls b/salt/kafka/reset_kafka.sls new file mode 100644 index 000000000..8789516cd --- /dev/null +++ b/salt/kafka/reset_kafka.sls @@ -0,0 +1,9 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +wipe_kafka_data: + file.absent: + - name: /nsm/kafka/data/ + - force: True \ No newline at end of file diff --git a/salt/kafka/soc_kafka.yaml b/salt/kafka/soc_kafka.yaml index 05f047c4a..686b2ad97 100644 --- a/salt/kafka/soc_kafka.yaml +++ b/salt/kafka/soc_kafka.yaml @@ -1,6 +1,6 @@ kafka: enabled: - description: Enable or disable Kafka. Recommended to have desired configuration staged prior to enabling Kafka. Join all receiver nodes to grid that will be converted to Kafka nodes, configure kafka_controllers with the hostnames of the nodes you want to act as controllers, and configure the default_replication_factor to the desired value for your redundancy needs. + description: Enable or disable Kafka. Recommended to have desired configuration staged prior to enabling Kafka. Configure kafka_controllers with the hostnames of the nodes you want to act as controllers, join all receiver nodes to grid that will be converted to Kafka nodes, and configure the default_replication_factor to the desired value for your redundancy needs. helpLink: kafka.html cluster_id: description: The ID of the Kafka cluster. @@ -13,9 +13,12 @@ kafka: sensitive: True helpLink: kafka.html kafka_controllers: - description: A comma-seperated list of Security Onion grid members that should act as controllers for this Kafka cluster. By default, the grid manager will use a 'combined' role where it will act as both a broker and controller. Keep total Kafka controllers to an odd number and ensure you do not assign ALL your Kafka nodes as controllers or this Kafka cluster will not start. + description: A comma-seperated list of Security Onion hosts that will act as Kafka controllers. These hosts will be responsible for managing the Kafka cluster. WARNING - The hostnames of receiver nodes intended to be controllers should be added here BEFORE they have joined the Security Onion grid or BEFORE enabling KAFKA. This is to ensure that data is not lost by converting a data broker to a controller. Failure to do so may result in topics becoming unavailable and requiring manual intervention to repair or resetting Kafka data. forcedType: "string" helpLink: kafka.html + reset_kafka: + description: Disable and reset the Kafka cluster. This will remove all Kafka data including logs that may have not yet been ingested into Elasticsearch and reverts the grid to using REDIS as the global pipeline. This is useful when testing different Kafka configurations such as rearranging Kafka brokers / controllers allowing you to reset the cluster rather than manually fixing any issues arising from attempting to reassign a Kafka broker into a controller. Enter 'YES_RESET_KAFKA' and submit to disable and reset Kafka. Make any configuration changes required and re-enable Kafka when ready. + helpLink: kafka.html config: broker: advertised_x_listeners: diff --git a/salt/salt/files/engines.conf b/salt/salt/files/engines.conf index de5685fff..dbfb89973 100644 --- a/salt/salt/files/engines.conf +++ b/salt/salt/files/engines.conf @@ -75,4 +75,20 @@ engines: cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' state.apply kafka - cmd.run: cmd: salt-call state.apply elasticfleet + - files: + - /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls + - /opt/so/saltstack/local/pillar/kafka/adv_kafka.sls + pillar: kafka.reset_kafka + default: '' + actions: + from: + '*': + to: + 'YES_RESET_KAFKA': + - cmd.run: + cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' saltutil.kill_all_jobs + - cmd.run: + cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' state.apply kafka.disabled,kafka.reset_kafka + - cmd.run: + cmd: /usr/sbin/so-yaml.py remove /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.reset_kafka interval: 10 From a81e4c33625cbdfd0ceb3ac7ad3a001ce4f8ad96 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 11 Jun 2024 11:55:17 -0400 Subject: [PATCH 700/777] remove dash(-) from kafka.id Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 b/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 index 899ec0c64..233cd647b 100644 --- a/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 +++ b/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 @@ -84,7 +84,7 @@ { "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } }, { "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } }, { "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } }, - { "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}-{{metadata.kafka.offset}}-{{metadata.kafka.timestamp}}", "ignore_failure": true } }, + { "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}", "ignore_failure": true } }, { "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp" ], "ignore_missing": true, "ignore_failure": true } } ], "on_failure": [ From 628893fd5b85da670d7fd4860c00dfe93ae208b5 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 11 Jun 2024 11:56:21 -0400 Subject: [PATCH 701/777] remove redundant 'kafka_' from annotations & defaults Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/defaults.yaml | 6 +++--- salt/kafka/soc_kafka.yaml | 13 +++++++------ salt/manager/tools/sbin/soup | 2 +- salt/salt/files/engines.conf | 6 +++--- 4 files changed, 14 insertions(+), 13 deletions(-) diff --git a/salt/kafka/defaults.yaml b/salt/kafka/defaults.yaml index e029bc251..062c2d5ca 100644 --- a/salt/kafka/defaults.yaml +++ b/salt/kafka/defaults.yaml @@ -1,9 +1,9 @@ kafka: enabled: False cluster_id: - kafka_pass: - kafka_controllers: - reset_kafka: + password: + controllers: + reset: config: broker: advertised_x_listeners: diff --git a/salt/kafka/soc_kafka.yaml b/salt/kafka/soc_kafka.yaml index 686b2ad97..59816cef3 100644 --- a/salt/kafka/soc_kafka.yaml +++ b/salt/kafka/soc_kafka.yaml @@ -1,6 +1,6 @@ kafka: enabled: - description: Enable or disable Kafka. Recommended to have desired configuration staged prior to enabling Kafka. Configure kafka_controllers with the hostnames of the nodes you want to act as controllers, join all receiver nodes to grid that will be converted to Kafka nodes, and configure the default_replication_factor to the desired value for your redundancy needs. + description: Enable or disable Kafka. Recommended to have desired configuration staged prior to enabling Kafka. Configure controllers with the hostnames of the nodes you want to act as controllers, join all receiver nodes to grid that will be converted to Kafka nodes, and configure the default_replication_factor to the desired value for your redundancy needs. helpLink: kafka.html cluster_id: description: The ID of the Kafka cluster. @@ -8,16 +8,17 @@ kafka: advanced: True sensitive: True helpLink: kafka.html - kafka_pass: + password: description: The password to use for the Kafka certificates. sensitive: True helpLink: kafka.html - kafka_controllers: + controllers: description: A comma-seperated list of Security Onion hosts that will act as Kafka controllers. These hosts will be responsible for managing the Kafka cluster. WARNING - The hostnames of receiver nodes intended to be controllers should be added here BEFORE they have joined the Security Onion grid or BEFORE enabling KAFKA. This is to ensure that data is not lost by converting a data broker to a controller. Failure to do so may result in topics becoming unavailable and requiring manual intervention to repair or resetting Kafka data. forcedType: "string" helpLink: kafka.html - reset_kafka: - description: Disable and reset the Kafka cluster. This will remove all Kafka data including logs that may have not yet been ingested into Elasticsearch and reverts the grid to using REDIS as the global pipeline. This is useful when testing different Kafka configurations such as rearranging Kafka brokers / controllers allowing you to reset the cluster rather than manually fixing any issues arising from attempting to reassign a Kafka broker into a controller. Enter 'YES_RESET_KAFKA' and submit to disable and reset Kafka. Make any configuration changes required and re-enable Kafka when ready. + reset: + description: Disable and reset the Kafka cluster. This will remove all Kafka data including logs that may have not yet been ingested into Elasticsearch and reverts the grid to using REDIS as the global pipeline. This is useful when testing different Kafka configurations such as rearranging Kafka brokers / controllers allowing you to reset the cluster rather than manually fixing any issues arising from attempting to reassign a Kafka broker into a controller. Enter 'YES_reset' and submit to disable and reset Kafka. Make any configuration changes required and re-enable Kafka when ready. This action CANNOT be reversed. + advanced: True helpLink: kafka.html config: broker: @@ -31,7 +32,7 @@ kafka: forcedType: bool helpLink: kafka.html default_x_replication_x_factor: - description: The default replication factor for automatically created topics. This value must be less than the amount of brokers in the cluster. Hosts specified in kafka_controllers should not be counted towards total broker count. + description: The default replication factor for automatically created topics. This value must be less than the amount of brokers in the cluster. Hosts specified in controllers should not be counted towards total broker count. title: default.replication.factor forcedType: int helpLink: kafka.html diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 56d2d7de3..e38ba40e0 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -644,7 +644,7 @@ up_to_2.4.80() { kafka_cluster_id=$(get_random_value 22) echo ' cluster_id: '$kafka_cluster_id >> /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafkapass=$(get_random_value) - echo ' kafka_pass: '$kafkapass >> /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls + echo ' password: '$kafkapass >> /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls INSTALLEDVERSION=2.4.80 } diff --git a/salt/salt/files/engines.conf b/salt/salt/files/engines.conf index dbfb89973..7b1f9bf3e 100644 --- a/salt/salt/files/engines.conf +++ b/salt/salt/files/engines.conf @@ -60,7 +60,7 @@ engines: - files: - /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls - /opt/so/saltstack/local/pillar/kafka/adv_kafka.sls - pillar: kafka.kafka_controllers + pillar: kafka.controllers default: '' actions: from: @@ -78,7 +78,7 @@ engines: - files: - /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls - /opt/so/saltstack/local/pillar/kafka/adv_kafka.sls - pillar: kafka.reset_kafka + pillar: kafka.reset default: '' actions: from: @@ -90,5 +90,5 @@ engines: - cmd.run: cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' state.apply kafka.disabled,kafka.reset_kafka - cmd.run: - cmd: /usr/sbin/so-yaml.py remove /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.reset_kafka + cmd: /usr/sbin/so-yaml.py remove /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.reset interval: 10 From d5ef0e57443edd62a801392cc7bcb52d11f12d56 Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Tue, 11 Jun 2024 12:34:32 -0600 Subject: [PATCH 702/777] Fix unnecessary escaping --- salt/soc/defaults.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index f5628f3c3..f0d028fdb 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -2261,7 +2261,7 @@ soc: meta: description = ""; strings: - $x = \"string\"; + $x = "string"; condition: all of them; } From c38f48c7f28f8b8e669eb2ae6dcf092fbf8896b8 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 12 Jun 2024 10:34:32 -0400 Subject: [PATCH 703/777] remove this \n --- salt/manager/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 0d52e5c16..821feed73 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -1295,7 +1295,7 @@ EOF # check if the FINAL_MESSAGE_QUEUE is not empty if (( ${#FINAL_MESSAGE_QUEUE[@]} != 0 )); then - echo "The following additional information applies specifically to your grid:\n" + echo "The following additional information applies specifically to your grid:" for m in "${FINAL_MESSAGE_QUEUE[@]}"; do echo "$m" echo From b7eebad2a570d53383194014787df2dffa8bad5d Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 12 Jun 2024 11:01:40 -0400 Subject: [PATCH 704/777] Update Kafka self reset & add initial Kafka wrapper scripts to build out Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/config.sls | 5 +- salt/kafka/nodes.map.jinja | 2 +- salt/kafka/{reset_kafka.sls => reset.sls} | 0 salt/kafka/soc_kafka.yaml | 2 +- salt/kafka/tools/sbin/so-kafka-cli | 47 +++++++++++ salt/kafka/tools/sbin/so-kafka-config-update | 87 ++++++++++++++++++++ salt/salt/files/engines.conf | 2 +- 7 files changed, 139 insertions(+), 6 deletions(-) rename salt/kafka/{reset_kafka.sls => reset.sls} (100%) create mode 100644 salt/kafka/tools/sbin/so-kafka-cli create mode 100644 salt/kafka/tools/sbin/so-kafka-config-update diff --git a/salt/kafka/config.sls b/salt/kafka/config.sls index 165daf7eb..1c3d8c26b 100644 --- a/salt/kafka/config.sls +++ b/salt/kafka/config.sls @@ -20,14 +20,13 @@ kafka: - uid: 960 - gid: 960 -{# Future tools to query kafka directly / show consumer groups kafka_sbin_tools: file.recurse: - name: /usr/sbin - source: salt://kafka/tools/sbin - user: 960 - group: 960 - - file_mode: 755 #} + - file_mode: 755 kafka_sbin_jinja_tools: file.recurse: @@ -69,7 +68,7 @@ kafka_kraft_{{sc}}_properties: reset_quorum_on_changes: cmd.run: - name: rm -f /nsm/kafka/data/__cluster_metadata-0/quorum-state - - watch: + - onchanges: - file: /opt/so/conf/kafka/server.properties {% else %} diff --git a/salt/kafka/nodes.map.jinja b/salt/kafka/nodes.map.jinja index c0b98de14..3a73b038f 100644 --- a/salt/kafka/nodes.map.jinja +++ b/salt/kafka/nodes.map.jinja @@ -16,7 +16,7 @@ tgt_type='compound') %} {% set STORED_KAFKANODES = salt['pillar.get']('kafka:nodes', default=None) %} -{% set KAFKA_CONTROLLERS_PILLAR = salt['pillar.get']('kafka:kafka_controllers', default=None) %} +{% set KAFKA_CONTROLLERS_PILLAR = salt['pillar.get']('kafka:controllers', default=None) %} {% set existing_ids = [] %} diff --git a/salt/kafka/reset_kafka.sls b/salt/kafka/reset.sls similarity index 100% rename from salt/kafka/reset_kafka.sls rename to salt/kafka/reset.sls diff --git a/salt/kafka/soc_kafka.yaml b/salt/kafka/soc_kafka.yaml index 59816cef3..1172fc5b9 100644 --- a/salt/kafka/soc_kafka.yaml +++ b/salt/kafka/soc_kafka.yaml @@ -17,7 +17,7 @@ kafka: forcedType: "string" helpLink: kafka.html reset: - description: Disable and reset the Kafka cluster. This will remove all Kafka data including logs that may have not yet been ingested into Elasticsearch and reverts the grid to using REDIS as the global pipeline. This is useful when testing different Kafka configurations such as rearranging Kafka brokers / controllers allowing you to reset the cluster rather than manually fixing any issues arising from attempting to reassign a Kafka broker into a controller. Enter 'YES_reset' and submit to disable and reset Kafka. Make any configuration changes required and re-enable Kafka when ready. This action CANNOT be reversed. + description: Disable and reset the Kafka cluster. This will remove all Kafka data including logs that may have not yet been ingested into Elasticsearch and reverts the grid to using REDIS as the global pipeline. This is useful when testing different Kafka configurations such as rearranging Kafka brokers / controllers allowing you to reset the cluster rather than manually fixing any issues arising from attempting to reassign a Kafka broker into a controller. Enter 'YES_RESET_KAFKA' and submit to disable and reset Kafka. Make any configuration changes required and re-enable Kafka when ready. This action CANNOT be reversed. advanced: True helpLink: kafka.html config: diff --git a/salt/kafka/tools/sbin/so-kafka-cli b/salt/kafka/tools/sbin/so-kafka-cli new file mode 100644 index 000000000..41993f67f --- /dev/null +++ b/salt/kafka/tools/sbin/so-kafka-cli @@ -0,0 +1,47 @@ +#! /bin/bash +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +if [ -z "$NOROOT" ]; then + # Check for prerequisites + if [ "$(id -u)" -ne 0 ]; then + echo "This script must be run using sudo!" + exit 1 + fi +fi + +function usage() { + echo -e "\nUsage: $0