From 28e8c54443d48c95f372c587e2ccceda56ff2ba1 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 15 Dec 2022 10:43:58 -0500 Subject: [PATCH] Wire telegraf initial commit --- salt/telegraf/defaults.yaml | 3 + salt/telegraf/etc/telegraf.conf | 2127 +------------------------------ setup/so-functions | 5 + setup/so-variables | 10 +- 4 files changed, 23 insertions(+), 2122 deletions(-) create mode 100644 salt/telegraf/defaults.yaml diff --git a/salt/telegraf/defaults.yaml b/salt/telegraf/defaults.yaml new file mode 100644 index 000000000..a70da5a59 --- /dev/null +++ b/salt/telegraf/defaults.yaml @@ -0,0 +1,3 @@ +telegraf: + config: + interval: '30s' \ No newline at end of file diff --git a/salt/telegraf/etc/telegraf.conf b/salt/telegraf/etc/telegraf.conf index 31a6d97e8..921a5f0e7 100644 --- a/salt/telegraf/etc/telegraf.conf +++ b/salt/telegraf/etc/telegraf.conf @@ -1,46 +1,22 @@ # Telegraf Configuration -# -# Telegraf is entirely plugin driven. All metrics are gathered from the -# declared inputs, and sent to the declared outputs. -# -# Plugins must be declared in here to be active. -# To deactivate a plugin, comment out the name and any variables. -# -# Use 'telegraf -config telegraf.conf -test' to see what metrics a config -# file would generate. -# -# Environment variables can be used anywhere in this config file, simply prepend -# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"), -# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR) - +{% import_yaml 'telegraf/defaults.yaml' as TGDEFAULTS %} {%- set MANAGER = GLOBALS.manager %} {%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %} {%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %} {%- set NODEIP = GLOBALS.node_ip %} -{%- set HELIX_API_KEY = salt['pillar.get']('fireeye:helix:api_key', '') %} {%- set UNIQUEID = salt['pillar.get']('sensor:uniqueid', '') %} -{%- set TRUE_CLUSTER = salt['pillar.get']('elasticsearch:true_cluster', False) %} {%- set ZEEK_ENABLED = salt['pillar.get']('zeek:enabled', True) %} {%- set MDENGINE = GLOBALS.md_engine %} # Global tags can be specified here in key="value" format. [global_tags] - # dc = "us-east-1" # will tag all metrics with dc=us-east-1 - # rack = "1a" - ## Environment variables can be used as tags, and throughout the config file - # user = "$USER" - role = "{{ GLOBALS.role.split('-') | last }}" - - -{% if grains['role'] == 'so-helix' %} - meta_cbid = "{{ UNIQUEID }}" -{% endif %} + role = "{{ GLOBALS.role.split('-') | last }}"s # Configuration for telegraf agent [agent] ## Default data collection interval for all inputs - interval = "30s" + interval = {{ TGDEFAULT.telegraf.config.interval }} ## Rounds collection interval to 'interval' ## ie, if interval="10s" then always collect on :00, :10, :20, etc. round_interval = true @@ -98,46 +74,9 @@ ############################################################################### # Configuration for sending metrics to InfluxDB -{% if grains['role'] != 'so-helix' %} [[outputs.influxdb]] - ## The full HTTP or UDP URL for your InfluxDB instance. - ## - ## Multiple URLs can be specified for a single cluster, only ONE of the - ## urls will be written to each interval. - # urls = ["unix:///var/run/influxdb.sock"] - # urls = ["udp://127.0.0.1:8089"] urls = ["https://{{ MANAGER }}:8086"] - - ## The target database for metrics; will be created as needed. - # database = "telegraf" - - ## If true, no CREATE DATABASE queries will be sent. Set to true when using - ## Telegraf with a user without permissions to create databases or when the - ## database already exists. - # skip_database_creation = false - - ## Name of existing retention policy to write to. Empty string writes to - ## the default retention policy. Only takes effect when using HTTP. - # retention_policy = "" - - ## Write consistency (clusters only), can be: "any", "one", "quorum", "all". - ## Only takes effect when using HTTP. - # write_consistency = "any" - - ## Timeout for HTTP messages. - # timeout = "5s" - - ## HTTP Basic Auth - # username = "telegraf" - # password = "metricsmetricsmetricsmetrics" - - ## HTTP User-Agent - # user_agent = "telegraf" - - ## UDP payload size is the maximum packet size to send. - # udp_payload = "512B" - ## Optional TLS Config for use on HTTP connections. tls_ca = "/etc/telegraf/ca.crt" tls_cert = "/etc/telegraf/telegraf.crt" @@ -145,347 +84,14 @@ ## Use TLS but skip chain & host verification # insecure_skip_verify = false - ## HTTP Proxy override, if unset values the standard proxy environment - ## variables are consulted to determine which proxy, if any, should be used. - # http_proxy = "http://corporate.proxy:3128" - - ## Additional HTTP headers - # http_headers = {"X-Special-Header" = "Special-Value"} - - ## HTTP Content-Encoding for write request body, can be set to "gzip" to - ## compress body or "identity" to apply no encoding. - # content_encoding = "identity" - - ## When true, Telegraf will output unsigned integers as unsigned values, - ## i.e.: "42u". You will need a version of InfluxDB supporting unsigned - ## integer values. Enabling this option will result in field type errors if - ## existing data has been written. - # influx_uint_support = false -{% else %} -# A plugin that can transmit metrics over HTTP -[[outputs.http]] - ## URL is the address to send metrics to - url = "https://helix-integrations.cloud.aws.apps.fireeye.com/api/upload" - - ## Timeout for HTTP message - # timeout = "5s" - - ## HTTP method, one of: "POST" or "PUT" - method = "POST" - - ## HTTP Basic Auth credentials - # username = "username" - # password = "pa$$word" - - ## OAuth2 Client Credentials Grant - # client_id = "clientid" - # client_secret = "secret" - # token_url = "https://indentityprovider/oauth2/v1/token" - # scopes = ["urn:opc:idm:__myscopes__"] - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false - - ## Data format to output. - ## Each data format has it's own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md - data_format = "json" - - ## HTTP Content-Encoding for write request body, can be set to "gzip" to - ## compress body or "identity" to apply no encoding. - content_encoding = "gzip" - - ## Additional HTTP headers - [outputs.http.headers] - # # Should be set manually to "application/json" for json data_format - Content-Type = "application/json; charset=utf-8" - Authorization = "{{ HELIX_API_KEY }}" - -{% endif %} ############################################################################### # PROCESSOR PLUGINS # ############################################################################### -# # Convert values to another metric value type -# [[processors.converter]] -# ## Tags to convert -# ## -# ## The table key determines the target type, and the array of key-values -# ## select the keys to convert. The array may contain globs. -# ## = [...] -# [processors.converter.tags] -# string = [] -# integer = [] -# unsigned = [] -# boolean = [] -# float = [] -# -# ## Fields to convert -# ## -# ## The table key determines the target type, and the array of key-values -# ## select the keys to convert. The array may contain globs. -# ## = [...] -# [processors.converter.fields] -# tag = [] -# string = [] -# integer = [] -# unsigned = [] -# boolean = [] -# float = [] - - -# # Map enum values according to given table. -# [[processors.enum]] -# [[processors.enum.mapping]] -# ## Name of the field to map -# field = "status" -# -# ## Destination field to be used for the mapped value. By default the source -# ## field is used, overwriting the original value. -# # dest = "status_code" -# -# ## Default value to be used for all values not contained in the mapping -# ## table. When unset, the unmodified value for the field will be used if no -# ## match is found. -# # default = 0 -# -# ## Table of mappings -# [processors.enum.mapping.value_mappings] -# green = 1 -# yellow = 2 -# red = 3 - - -# # Apply metric modifications using override semantics. -# [[processors.override]] -# ## All modifications on inputs and aggregators can be overridden: -# # name_override = "new_name" -# # name_prefix = "new_name_prefix" -# # name_suffix = "new_name_suffix" -# -# ## Tags to be added (all values must be strings) -# # [processors.override.tags] -# # additional_tag = "tag_value" - - -# # Parse a value in a specified field/tag(s) and add the result in a new metric -# [[processors.parser]] -# ## The name of the fields whose value will be parsed. -# parse_fields = [] -# -# ## If true, incoming metrics are not emitted. -# drop_original = false -# -# ## If set to override, emitted metrics will be merged by overriding the -# ## original metric using the newly parsed metrics. -# merge = "override" -# -# ## The dataformat to be read from files -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "influx" - - -# # Print all metrics that pass through this filter. -# [[processors.printer]] - - -# # Transforms tag and field values with regex pattern -# [[processors.regex]] -# ## Tag and field conversions defined in a separate sub-tables -# # [[processors.regex.tags]] -# # ## Tag to change -# # key = "resp_code" -# # ## Regular expression to match on a tag value -# # pattern = "^(\\d)\\d\\d$" -# # ## Pattern for constructing a new value (${1} represents first subgroup) -# # replacement = "${1}xx" -# -# # [[processors.regex.fields]] -# # key = "request" -# # ## All the power of the Go regular expressions available here -# # ## For example, named subgroups -# # pattern = "^/api(?P/[\\w/]+)\\S*" -# # replacement = "${method}" -# # ## If result_key is present, a new field will be created -# # ## instead of changing existing field -# # result_key = "method" -# -# ## Multiple conversions may be applied for one field sequentially -# ## Let's extract one more value -# # [[processors.regex.fields]] -# # key = "request" -# # pattern = ".*category=(\\w+).*" -# # replacement = "${1}" -# # result_key = "search_category" - - -# # Rename measurements, tags, and fields that pass through this filter. -# [[processors.rename]] - - -# # Perform string processing on tags, fields, and measurements -# [[processors.strings]] -# ## Convert a tag value to uppercase -# # [[processors.strings.uppercase]] -# # tag = "method" -# -# ## Convert a field value to lowercase and store in a new field -# # [[processors.strings.lowercase]] -# # field = "uri_stem" -# # dest = "uri_stem_normalised" -# -# ## Trim leading and trailing whitespace using the default cutset -# # [[processors.strings.trim]] -# # field = "message" -# -# ## Trim leading characters in cutset -# # [[processors.strings.trim_left]] -# # field = "message" -# # cutset = "\t" -# -# ## Trim trailing characters in cutset -# # [[processors.strings.trim_right]] -# # field = "message" -# # cutset = "\r\n" -# -# ## Trim the given prefix from the field -# # [[processors.strings.trim_prefix]] -# # field = "my_value" -# # prefix = "my_" -# -# ## Trim the given suffix from the field -# # [[processors.strings.trim_suffix]] -# # field = "read_count" -# # suffix = "_count" -# -# ## Replace substrings within field names -# # [[processors.strings.trim_suffix]] -# # measurement = "*" -# # old = ":" -# # new = "_" - - -# # Print all metrics that pass through this filter. -# [[processors.topk]] -# ## How many seconds between aggregations -# # period = 10 -# -# ## How many top metrics to return -# # k = 10 -# -# ## Over which tags should the aggregation be done. Globs can be specified, in -# ## which case any tag matching the glob will aggregated over. If set to an -# ## empty list is no aggregation over tags is done -# # group_by = ['*'] -# -# ## Over which fields are the top k are calculated -# # fields = ["value"] -# -# ## What aggregation to use. Options: sum, mean, min, max -# # aggregation = "mean" -# -# ## Instead of the top k largest metrics, return the bottom k lowest metrics -# # bottomk = false -# -# ## The plugin assigns each metric a GroupBy tag generated from its name and -# ## tags. If this setting is different than "" the plugin will add a -# ## tag (which name will be the value of this setting) to each metric with -# ## the value of the calculated GroupBy tag. Useful for debugging -# # add_groupby_tag = "" -# -# ## These settings provide a way to know the position of each metric in -# ## the top k. The 'add_rank_field' setting allows to specify for which -# ## fields the position is required. If the list is non empty, then a field -# ## will be added to each and every metric for each string present in this -# ## setting. This field will contain the ranking of the group that -# ## the metric belonged to when aggregated over that field. -# ## The name of the field will be set to the name of the aggregation field, -# ## suffixed with the string '_topk_rank' -# # add_rank_fields = [] -# -# ## These settings provide a way to know what values the plugin is generating -# ## when aggregating metrics. The 'add_agregate_field' setting allows to -# ## specify for which fields the final aggregation value is required. If the -# ## list is non empty, then a field will be added to each every metric for -# ## each field present in this setting. This field will contain -# ## the computed aggregation for the group that the metric belonged to when -# ## aggregated over that field. -# ## The name of the field will be set to the name of the aggregation field, -# ## suffixed with the string '_topk_aggregate' -# # add_aggregate_fields = [] - - - ############################################################################### # AGGREGATOR PLUGINS # ############################################################################### -# # Keep the aggregate basicstats of each metric passing through. -# [[aggregators.basicstats]] -# ## General Aggregator Arguments: -# ## The period on which to flush & clear the aggregator. -# period = "30s" -# ## If true, the original metric will be dropped by the -# ## aggregator and will not get sent to the output plugins. -# drop_original = false - - -# # Create aggregate histograms. -# [[aggregators.histogram]] -# ## The period in which to flush the aggregator. -# period = "30s" -# -# ## If true, the original metric will be dropped by the -# ## aggregator and will not get sent to the output plugins. -# drop_original = false -# -# ## Example config that aggregates all fields of the metric. -# # [[aggregators.histogram.config]] -# # ## The set of buckets. -# # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0] -# # ## The name of metric. -# # measurement_name = "cpu" -# -# ## Example config that aggregates only specific fields of the metric. -# # [[aggregators.histogram.config]] -# # ## The set of buckets. -# # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] -# # ## The name of metric. -# # measurement_name = "diskio" -# # ## The concrete fields of metric -# # fields = ["io_time", "read_time", "write_time"] - - -# # Keep the aggregate min/max of each metric passing through. -# [[aggregators.minmax]] -# ## General Aggregator Arguments: -# ## The period on which to flush & clear the aggregator. -# period = "30s" -# ## If true, the original metric will be dropped by the -# ## aggregator and will not get sent to the output plugins. -# drop_original = false - - -# # Count the occurrence of values in fields. -# [[aggregators.valuecounter]] -# ## General Aggregator Arguments: -# ## The period on which to flush & clear the aggregator. -# period = "30s" -# ## If true, the original metric will be dropped by the -# ## aggregator and will not get sent to the output plugins. -# drop_original = false -# ## The fields for which the values will be counted -# fields = [] - - - ############################################################################### # INPUT PLUGINS # ############################################################################### @@ -543,27 +149,22 @@ [[inputs.kernel]] # no configuration - # Read metrics about memory usage [[inputs.mem]] # no configuration - # Get the number of processes and group them by status [[inputs.processes]] # no configuration - # Read metrics about swap memory usage [[inputs.swap]] # no configuration - # Read metrics about system load & uptime [[inputs.system]] # no configuration - # # Collect bond interface status, slaves statuses and failures count [[inputs.bond]] # ## Sets 'proc' directory path @@ -575,7 +176,6 @@ # ## bond interfaces. # # bond_interfaces = ["bond0"] - # # Read metrics about docker containers [[inputs.docker]] # ## Docker Endpoint @@ -583,58 +183,19 @@ # ## To use environment variables (ie, docker-machine), set endpoint = "ENV" endpoint = "unix:///var/run/docker.sock" # -# ## Set to true to collect Swarm metrics(desired_replicas, running_replicas) -# gather_services = false -# -# ## Only collect metrics for these containers, collect all if empty -# container_names = [] -# -# ## Containers to include and exclude. Globs accepted. -# ## Note that an empty array for both will include all containers -# container_name_include = [] -# container_name_exclude = [] -# -# ## Container states to include and exclude. Globs accepted. -# ## When empty only containers in the "running" state will be captured. -# # container_state_include = [] -# # container_state_exclude = [] -# -# ## Timeout for docker list, info, and stats commands -# timeout = "5s" -# -# ## Whether to report for each container per-device blkio (8:0, 8:1...) and -# ## network (eth0, eth1, ...) stats or not -# perdevice = true -# ## Whether to report for each container total blkio and network stats or not -# total = false -# ## Which environment variables should we use as a tag -# ##tag_env = ["JAVA_HOME", "HEAP_SIZE"] -# -# ## docker labels to include and exclude as tags. Globs accepted. -# ## Note that an empty array for both will include all labels as tags -# docker_label_include = [] -# docker_label_exclude = [] -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - # # Read stats from one or more Elasticsearch servers or clusters -{%- if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone'] %} +{%- if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone', 'so-heavynode'] %} [[inputs.elasticsearch]] - servers = ["https://{{ MANAGER }}:9200"] + servers = ["https://{{ NODEIP }}:9200"] cluster_stats = true username = "{{ ES_USER }}" password = "{{ ES_PASS }}" insecure_skip_verify = true -{%- elif grains['role'] in ['so-searchnode', 'so-hotnode', 'so-warmnode', 'so-heavynode'] %} +{%- elif grains['role'] in ['so-searchnode', 'so-hotnode', 'so-warmnode'] %} [[inputs.elasticsearch]] servers = ["https://{{ NODEIP }}:9200"] - cluster_stats = true + cluster_stats = false username = "{{ ES_USER }}" password = "{{ ES_PASS }}" insecure_skip_verify = true @@ -811,53 +372,8 @@ ] data_format = "influx" timeout = "15s" -{% elif grains['role'] == 'so-helix' %} -[[inputs.exec]] - commands = [ - "/scripts/stenoloss.sh", - "/scripts/suriloss.sh", - "/scripts/checkfiles.sh", - {%- if MDENGINE == 'ZEEK' and ZEEK_ENABLED %} - "/scripts/zeekloss.sh", - "/scripts/zeekcaptureloss.sh", - {%- endif %} - "/scripts/oldpcap.sh", - "/scripts/helixeps.sh" - ] - data_format = "influx" - timeout = "15s" {% endif %} - ## measurement name suffix (for separating different commands) - # name_suffix = "_mycollector" - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - - - -# # Read metrics from fail2ban. -# [[inputs.fail2ban]] -# ## Use sudo to run fail2ban-client -# use_sudo = false - -# # Reload and gather from file[s] on telegraf's interval. -# [[inputs.file]] -# ## Files to parse each interval. -# ## These accept standard unix glob matching rules, but with the addition of -# ## ** as a "super asterisk". ie: -# ## /var/log/**.log -> recursively find all .log files in /var/log -# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log -# ## /var/log/apache.log -> only read the apache log file -# files = ["/var/log/apache/access.log"] -# -# ## The dataformat to be read from files -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "influx" {%- if salt['pillar.get']('healthcheck:enabled', False) %} [[inputs.file]] files = ["/host/nsm/zeek/logs/zeek_restart.log"] @@ -871,1634 +387,5 @@ json_string_fields = ['manint', 'monint'] tag_keys = ['role'] -# # Count files in a directory -# [[inputs.filecount]] -# ## Directory to gather stats about. -# ## deprecated in 1.9; use the directories option -# directory = "/var/cache/apt/archives" -# -# ## Directories to gather stats about. -# ## This accept standard unit glob matching rules, but with the addition of -# ## ** as a "super asterisk". ie: -# ## /var/log/** -> recursively find all directories in /var/log and count files in each directories -# ## /var/log/*/* -> find all directories with a parent dir in /var/log and count files in each directories -# ## /var/log -> count all files in /var/log and all of its subdirectories -# directories = ["/var/cache/apt/archives"] -# -# ## Only count files that match the name pattern. Defaults to "*". -# name = "*.deb" -# -# ## Count files in subdirectories. Defaults to true. -# recursive = false -# -# ## Only count regular files. Defaults to true. -# regular_only = true -# -# ## Only count files that are at least this size. If size is -# ## a negative number, only count files that are smaller than the -# ## absolute value of size. Acceptable units are B, KiB, MiB, KB, ... -# ## Without quotes and units, interpreted as size in bytes. -# size = "0B" -# -# ## Only count files that have not been touched for at least this -# ## duration. If mtime is negative, only count files that have been -# ## touched in this duration. Defaults to "0s". -# mtime = "0s" - - -# # Read stats about given file(s) -# [[inputs.filestat]] -# ## Files to gather stats about. -# ## These accept standard unix glob matching rules, but with the addition of -# ## ** as a "super asterisk". ie: -# ## "/var/log/**.log" -> recursively find all .log files in /var/log -# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log -# ## "/var/log/apache.log" -> just tail the apache log file -# ## -# ## See https://github.com/gobwas/glob for more examples -# ## -# files = ["/var/log/**.log"] -# ## If true, read the entire file and calculate an md5 checksum. -# md5 = false - - -# # Read metrics exposed by fluentd in_monitor plugin -# [[inputs.fluentd]] -# ## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint). -# ## -# ## Endpoint: -# ## - only one URI is allowed -# ## - https is not supported -# endpoint = "http://localhost:24220/api/plugins.json" -# -# ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent) -# exclude = [ -# "monitor_agent", -# "dummy", -# ] - -# # Monitor disks' temperatures using hddtemp -# [[inputs.hddtemp]] -# ## By default, telegraf gathers temps data from all disks detected by the -# ## hddtemp. -# ## -# ## Only collect temps from the selected disks. -# ## -# ## A * as the device name will return the temperature values of all disks. -# ## -# # address = "127.0.0.1:7634" -# # devices = ["sda", "*"] - -# # Collect statistics about itself -# [[inputs.internal]] -# ## If true, collect telegraf memory stats. -# # collect_memstats = true - - -# # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs. -# [[inputs.interrupts]] -# ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e. -# # [inputs.interrupts.tagdrop] -# # irq = [ "NET_RX", "TASKLET" ] - - -# # Read metrics from the bare metal servers via IPMI -# [[inputs.ipmi_sensor]] -# ## optionally specify the path to the ipmitool executable -# # path = "/usr/bin/ipmitool" -# ## -# ## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR -# # privilege = "ADMINISTRATOR" -# ## -# ## optionally specify one or more servers via a url matching -# ## [username[:password]@][protocol[(address)]] -# ## e.g. -# ## root:passwd@lan(127.0.0.1) -# ## -# ## if no servers are specified, local machine sensor stats will be queried -# ## -# # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"] -# -# ## Recommended: use metric 'interval' that is a multiple of 'timeout' to avoid -# ## gaps or overlap in pulled data -# interval = "30s" -# -# ## Timeout for the ipmitool command to complete -# timeout = "20s" -# -# ## Schema Version: (Optional, defaults to version 1) -# metric_version = 2 - - -# # Gather packets and bytes counters from Linux ipsets -# [[inputs.ipset]] -# ## By default, we only show sets which have already matched at least 1 packet. -# ## set include_unmatched_sets = true to gather them all. -# include_unmatched_sets = false -# ## Adjust your sudo settings appropriately if using this option ("sudo ipset save") -# use_sudo = false -# ## The default timeout of 1s for ipset execution can be overridden here: -# # timeout = "1s" - - -# # Gather packets and bytes throughput from iptables -# [[inputs.iptables]] -# ## iptables require root access on most systems. -# ## Setting 'use_sudo' to true will make use of sudo to run iptables. -# ## Users must configure sudo to allow telegraf user to run iptables with no password. -# ## iptables can be restricted to only list command "iptables -nvL". -# use_sudo = false -# ## Setting 'use_lock' to true runs iptables with the "-w" option. -# ## Adjust your sudo settings appropriately if using this option ("iptables -wnvl") -# use_lock = false -# ## Define an alternate executable, such as "ip6tables". Default is "iptables". -# # binary = "ip6tables" -# ## defines the table to monitor: -# table = "filter" -# ## defines the chains to monitor. -# ## NOTE: iptables rules without a comment will not be monitored. -# ## Read the plugin documentation for more information. -# chains = [ "INPUT" ] - - -# # Collect virtual and real server stats from Linux IPVS -# [[inputs.ipvs]] -# # no configuration - - -# # Get kernel statistics from /proc/vmstat -# [[inputs.kernel_vmstat]] -# # no configuration - - -# # Read status information from one or more Kibana servers -# [[inputs.kibana]] -# ## specify a list of one or more Kibana servers -# servers = ["http://localhost:5601"] -# -# ## Timeout for HTTP requests -# timeout = "5s" -# -# ## HTTP Basic Auth credentials -# # username = "username" -# # password = "pa$$word" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - -# # Provides Linux sysctl fs metrics -# [[inputs.linux_sysctl_fs]] -# # no configuration - - -# # Read metrics from one or many mysql servers -# [[inputs.mysql]] -# ## specify servers via a url matching: -# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]] -# ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name -# ## e.g. -# ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"] -# ## servers = ["user@tcp(127.0.0.1:3306)/?tls=false"] -# # -# ## If no servers are specified, then localhost is used as the host. -# servers = ["tcp(127.0.0.1:3306)/"] -# -# ## Selects the metric output format. -# ## -# ## This option exists to maintain backwards compatibility, if you have -# ## existing metrics do not set or change this value until you are ready to -# ## migrate to the new format. -# ## -# ## If you do not have existing metrics from this plugin set to the latest -# ## version. -# ## -# ## Telegraf >=1.6: metric_version = 2 -# ## <1.6: metric_version = 1 (or unset) -# metric_version = 2 -# -# ## the limits for metrics form perf_events_statements -# perf_events_statements_digest_text_limit = 120 -# perf_events_statements_limit = 250 -# perf_events_statements_time_limit = 86400 -# # -# ## if the list is empty, then metrics are gathered from all databasee tables -# table_schema_databases = [] -# # -# ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list -# gather_table_schema = false -# # -# ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST -# gather_process_list = true -# # -# ## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS -# gather_user_statistics = true -# # -# ## gather auto_increment columns and max values from information schema -# gather_info_schema_auto_inc = true -# # -# ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS -# gather_innodb_metrics = true -# # -# ## gather metrics from SHOW SLAVE STATUS command output -# gather_slave_status = true -# # -# ## gather metrics from SHOW BINARY LOGS command output -# gather_binary_logs = false -# # -# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE -# gather_table_io_waits = false -# # -# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS -# gather_table_lock_waits = false -# # -# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE -# gather_index_io_waits = false -# # -# ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS -# gather_event_waits = false -# # -# ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME -# gather_file_events_stats = false -# # -# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST -# gather_perf_events_statements = false -# # -# ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES) -# interval_slow = "30m" -# -# ## Optional TLS Config (will be used if tls=custom parameter specified in server uri) -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - # # Read metrics about network interface usage [[inputs.net]] -# ## By default, telegraf gathers stats from any up interface (excluding loopback) -# ## Setting interfaces will tell it to gather these explicit interfaces, -# ## regardless of status. -# ## -# # interfaces = ["eth0"] -# ## -# ## On linux systems telegraf also collects protocol stats. -# ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics. -# ## -# # ignore_protocol_stats = false -# ## - - -# # Collect response time of a TCP or UDP connection -# [[inputs.net_response]] -# ## Protocol, must be "tcp" or "udp" -# ## NOTE: because the "udp" protocol does not respond to requests, it requires -# ## a send/expect string pair (see below). -# protocol = "tcp" -# ## Server address (default localhost) -# address = "localhost:80" -# -# ## Set timeout -# # timeout = "1s" -# -# ## Set read timeout (only used if expecting a response) -# # read_timeout = "1s" -# -# ## The following options are required for UDP checks. For TCP, they are -# ## optional. The plugin will send the given string to the server and then -# ## expect to receive the given 'expect' string back. -# ## string sent to the server -# # send = "ssh" -# ## expected string in answer -# # expect = "ssh" -# -# ## Uncomment to remove deprecated fields -# # fieldexclude = ["result_type", "string_found"] - - -# # Read TCP metrics such as established, time wait and sockets counts. -# [[inputs.netstat]] -# # no configuration - - -# # Read Nginx's basic status information (ngx_http_stub_status_module) -# [[inputs.nginx]] -# # An array of Nginx stub_status URI to gather stats. -# urls = ["http://localhost/server_status"] -# -# ## Optional TLS Config -# tls_ca = "/etc/telegraf/ca.pem" -# tls_cert = "/etc/telegraf/cert.cer" -# tls_key = "/etc/telegraf/key.key" -# ## Use TLS but skip chain & host verification -# insecure_skip_verify = false -# -# # HTTP response timeout (default: 5s) -# response_timeout = "5s" - - -# # Read Nginx Plus' full status information (ngx_http_status_module) -# [[inputs.nginx_plus]] -# ## An array of ngx_http_status_module or status URI to gather stats. -# urls = ["http://localhost/status"] -# -# # HTTP response timeout (default: 5s) -# response_timeout = "5s" - - -# # Read Nginx Plus Api documentation -# [[inputs.nginx_plus_api]] -# ## An array of API URI to gather stats. -# urls = ["http://localhost/api"] -# -# # Nginx API version, default: 3 -# # api_version = 3 -# -# # HTTP response timeout (default: 5s) -# response_timeout = "5s" - - -# # Read Nginx virtual host traffic status module information (nginx-module-vts) -# [[inputs.nginx_vts]] -# ## An array of ngx_http_status_module or status URI to gather stats. -# urls = ["http://localhost/status"] -# -# ## HTTP response timeout (default: 5s) -# response_timeout = "5s" - - -# # Collect kernel snmp counters and network interface statistics -# [[inputs.nstat]] -# ## file paths for proc files. If empty default paths will be used: -# ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6 -# ## These can also be overridden with env variables, see README. -# proc_net_netstat = "/proc/net/netstat" -# proc_net_snmp = "/proc/net/snmp" -# proc_net_snmp6 = "/proc/net/snmp6" -# ## dump metrics with 0 values too -# dump_zeros = true - - -# # Get standard NTP query metrics, requires ntpq executable. -# [[inputs.ntpq]] -# ## If false, set the -n ntpq flag. Can reduce metric gather time. -# dns_lookup = true - - -# # Gather counters from PF -# [[inputs.pf]] -# ## PF require root access on most systems. -# ## Setting 'use_sudo' to true will make use of sudo to run pfctl. -# ## Users must configure sudo to allow telegraf user to run pfctl with no password. -# ## pfctl can be restricted to only list command "pfctl -s info". -# use_sudo = false - - - -# # Ping given url(s) and return statistics -# [[inputs.ping]] -# ## List of urls to ping -# urls = ["example.org"] -# -# ## Number of pings to send per collection (ping -c ) -# # count = 1 -# -# ## Interval, in s, at which to ping. 0 == default (ping -i ) -# ## Not available in Windows. -# # ping_interval = 1.0 -# -# ## Per-ping timeout, in s. 0 == no timeout (ping -W ) -# # timeout = 1.0 -# -# ## Total-ping deadline, in s. 0 == no deadline (ping -w ) -# # deadline = 10 -# -# ## Interface or source address to send ping from (ping -I ) -# ## on Darwin and Freebsd only source address possible: (ping -S ) -# # interface = "" -# -# ## Specify the ping executable binary, default is "ping" -# # binary = "ping" -# -# ## Arguments for ping command -# ## when arguments is not empty, other options (ping_interval, timeout, etc) will be ignored -# # arguments = ["-c", "3"] - - - -# # Monitor process cpu and memory usage -# [[inputs.procstat]] -# ## PID file to monitor process -# pid_file = "/var/run/nginx.pid" -# ## executable name (ie, pgrep ) -# # exe = "nginx" -# ## pattern as argument for pgrep (ie, pgrep -f ) -# # pattern = "nginx" -# ## user as argument for pgrep (ie, pgrep -u ) -# # user = "nginx" -# ## Systemd unit name -# # systemd_unit = "nginx.service" -# ## CGroup name or path -# # cgroup = "systemd/system.slice/nginx.service" -# -# ## Windows service name -# # win_service = "" -# -# ## override for process_name -# ## This is optional; default is sourced from /proc//status -# # process_name = "bar" -# -# ## Field name prefix -# # prefix = "" -# -# ## Add PID as a tag instead of a field; useful to differentiate between -# ## processes whose tags are otherwise the same. Can create a large number -# ## of series, use judiciously. -# # pid_tag = false -# -# ## Method to use when finding process IDs. Can be one of 'pgrep', or -# ## 'native'. The pgrep finder calls the pgrep executable in the PATH while -# ## the native finder performs the search directly in a manor dependent on the -# ## platform. Default is 'pgrep' -# # pid_finder = "pgrep" - - -# # Reads last_run_summary.yaml file and converts to measurments -# [[inputs.puppetagent]] -# ## Location of puppet last run summary file -# location = "/var/lib/puppet/state/last_run_summary.yaml" - - -# # Read metrics from one or many redis servers -# [[inputs.redis]] -# ## specify servers via a url matching: -# ## [protocol://][:password]@address[:port] -# ## e.g. -# ## tcp://localhost:6379 -# ## tcp://:password@192.168.99.100 -# ## unix:///var/run/redis.sock -# ## -# ## If no servers are specified, then localhost is used as the host. -# ## If no port is specified, 6379 is used -# servers = ["tcp://localhost:6379"] -# -# ## specify server password -# # password = "s#cr@t%" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = true - - -# # Monitor sensors, requires lm-sensors package -# [[inputs.sensors]] -# ## Remove numbers from field names. -# ## If true, a field name like 'temp1_input' will be changed to 'temp_input'. -# # remove_numbers = true -# -# ## Timeout is the maximum amount of time that the sensors command can run. -# # timeout = "5s" - - -# # Read metrics from storage devices supporting S.M.A.R.T. -# [[inputs.smart]] -# ## Optionally specify the path to the smartctl executable -# # path = "/usr/bin/smartctl" -# # -# ## On most platforms smartctl requires root access. -# ## Setting 'use_sudo' to true will make use of sudo to run smartctl. -# ## Sudo must be configured to to allow the telegraf user to run smartctl -# ## with out password. -# # use_sudo = false -# # -# ## Skip checking disks in this power mode. Defaults to -# ## "standby" to not wake up disks that have stoped rotating. -# ## See --nocheck in the man pages for smartctl. -# ## smartctl version 5.41 and 5.42 have faulty detection of -# ## power mode and might require changing this value to -# ## "never" depending on your disks. -# # nocheck = "standby" -# # -# ## Gather detailed metrics for each SMART Attribute. -# ## Defaults to "false" -# ## -# # attributes = false -# # -# ## Optionally specify devices to exclude from reporting. -# # excludes = [ "/dev/pass6" ] -# # -# ## Optionally specify devices and device type, if unset -# ## a scan (smartctl --scan) for S.M.A.R.T. devices will -# ## done and all found will be included except for the -# ## excluded in excludes. -# # devices = [ "/dev/ada0 -d atacam" ] - - -# # Sysstat metrics collector -# [[inputs.sysstat]] -# ## Path to the sadc command. -# # -# ## Common Defaults: -# ## Debian/Ubuntu: /usr/lib/sysstat/sadc -# ## Arch: /usr/lib/sa/sadc -# ## RHEL/CentOS: /usr/lib64/sa/sadc -# sadc_path = "/usr/lib/sa/sadc" # required -# # -# # -# ## Path to the sadf command, if it is not in PATH -# # sadf_path = "/usr/bin/sadf" -# # -# # -# ## Activities is a list of activities, that are passed as argument to the -# ## sadc collector utility (e.g: DISK, SNMP etc...) -# ## The more activities that are added, the more data is collected. -# # activities = ["DISK"] -# # -# # -# ## Group metrics to measurements. -# ## -# ## If group is false each metric will be prefixed with a description -# ## and represents itself a measurement. -# ## -# ## If Group is true, corresponding metrics are grouped to a single measurement. -# # group = true -# # -# # -# ## Options for the sadf command. The values on the left represent the sadf -# ## options and the values on the right their description (which are used for -# ## grouping and prefixing metrics). -# ## -# ## Run 'sar -h' or 'man sar' to find out the supported options for your -# ## sysstat version. -# [inputs.sysstat.options] -# -C = "cpu" -# -B = "paging" -# -b = "io" -# -d = "disk" # requires DISK activity -# "-n ALL" = "network" -# "-P ALL" = "per_cpu" -# -q = "queue" -# -R = "mem" -# -r = "mem_util" -# -S = "swap_util" -# -u = "cpu_util" -# -v = "inode" -# -W = "swap" -# -w = "task" -# # -H = "hugepages" # only available for newer linux distributions -# # "-I ALL" = "interrupts" # requires INT activity -# # -# # -# ## Device tags can be used to add additional tags for devices. -# ## For example the configuration below adds a tag vg with value rootvg for -# ## all metrics with sda devices. -# # [[inputs.sysstat.device_tags.sda]] -# # vg = "rootvg" - -# # Read metrics about temperature -# [[inputs.temp]] -# # no configuration - - - - -# # Reads metrics from a SSL certificate -# [[inputs.x509_cert]] -# ## List certificate sources -# sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "tcp://example.org:443"] -# -# ## Timeout for SSL connection -# # timeout = "5s" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - - -############################################################################### -# SERVICE INPUT PLUGINS # -############################################################################### - -# # AMQP consumer plugin -# [[inputs.amqp_consumer]] -# ## Broker to consume from. -# ## deprecated in 1.7; use the brokers option -# # url = "amqp://localhost:5672/influxdb" -# -# ## Brokers to consume from. If multiple brokers are specified a random broker -# ## will be selected anytime a connection is established. This can be -# ## helpful for load balancing when not using a dedicated load balancer. -# brokers = ["amqp://localhost:5672/influxdb"] -# -# ## Authentication credentials for the PLAIN auth_method. -# # username = "" -# # password = "" -# -# ## Exchange to declare and consume from. -# exchange = "telegraf" -# -# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash". -# # exchange_type = "topic" -# -# ## If true, exchange will be passively declared. -# # exchange_passive = false -# -# ## Exchange durability can be either "transient" or "durable". -# # exchange_durability = "durable" -# -# ## Additional exchange arguments. -# # exchange_arguments = { } -# # exchange_arguments = {"hash_propery" = "timestamp"} -# -# ## AMQP queue name. -# queue = "telegraf" -# -# ## AMQP queue durability can be "transient" or "durable". -# queue_durability = "durable" -# -# ## Binding Key. -# binding_key = "#" -# -# ## Maximum number of messages server should give to the worker. -# # prefetch_count = 50 -# -# ## Maximum messages to read from the broker that have not been written by an -# ## output. For best throughput set based on the number of metrics within -# ## each message and the size of the output's metric_batch_size. -# ## -# ## For example, if each message from the queue contains 10 metrics and the -# ## output metric_batch_size is 1000, setting this to 100 will ensure that a -# ## full batch is collected and the write is triggered immediately without -# ## waiting until the next flush_interval. -# # max_undelivered_messages = 1000 -# -# ## Auth method. PLAIN and EXTERNAL are supported -# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as -# ## described here: https://www.rabbitmq.com/plugins.html -# # auth_method = "PLAIN" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# -# ## Data format to consume. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "influx" - - -# # Read Cassandra metrics through Jolokia -# [[inputs.cassandra]] -# ## DEPRECATED: The cassandra plugin has been deprecated. Please use the -# ## jolokia2 plugin instead. -# ## -# ## see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2 -# -# context = "/jolokia/read" -# ## List of cassandra servers exposing jolokia read service -# servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"] -# ## List of metrics collected on above servers -# ## Each metric consists of a jmx path. -# ## This will collect all heap memory usage metrics from the jvm and -# ## ReadLatency metrics for all keyspaces and tables. -# ## "type=Table" in the query works with Cassandra3.0. Older versions might -# ## need to use "type=ColumnFamily" -# metrics = [ -# "/java.lang:type=Memory/HeapMemoryUsage", -# "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency" -# ] - - -# # Influx HTTP write listener -# [[inputs.http_listener]] -# ## Address and port to host HTTP listener on -# service_address = ":8186" -# -# ## maximum duration before timing out read of the request -# read_timeout = "10s" -# ## maximum duration before timing out write of the response -# write_timeout = "10s" -# -# ## Maximum allowed http request body size in bytes. -# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) -# max_body_size = "500MiB" -# -# ## Maximum line size allowed to be sent in bytes. -# ## 0 means to use the default of 65536 bytes (64 kibibytes) -# max_line_size = "64KiB" -# -# ## Set one or more allowed client CA certificate file names to -# ## enable mutually authenticated TLS connections -# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] -# -# ## Add service certificate and key -# tls_cert = "/etc/telegraf/cert.pem" -# tls_key = "/etc/telegraf/key.pem" -# -# ## Optional username and password to accept for HTTP basic authentication. -# ## You probably want to make sure you have TLS configured above for this. -# # basic_username = "foobar" -# # basic_password = "barfoo" - - -# # Generic HTTP write listener -# [[inputs.http_listener_v2]] -# ## Address and port to host HTTP listener on -# service_address = ":8080" -# -# ## Path to listen to. -# # path = "/telegraf" -# -# ## HTTP methods to accept. -# # methods = ["POST", "PUT"] -# -# ## maximum duration before timing out read of the request -# # read_timeout = "10s" -# ## maximum duration before timing out write of the response -# # write_timeout = "10s" -# -# ## Maximum allowed http request body size in bytes. -# ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) -# # max_body_size = "500MB" -# -# ## Set one or more allowed client CA certificate file names to -# ## enable mutually authenticated TLS connections -# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] -# -# ## Add service certificate and key -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# -# ## Optional username and password to accept for HTTP basic authentication. -# ## You probably want to make sure you have TLS configured above for this. -# # basic_username = "foobar" -# # basic_password = "barfoo" -# -# ## Data format to consume. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "influx" - - -# # Influx HTTP write listener -# [[inputs.influxdb_listener]] -# ## Address and port to host HTTP listener on -# service_address = ":8186" -# -# ## maximum duration before timing out read of the request -# read_timeout = "10s" -# ## maximum duration before timing out write of the response -# write_timeout = "10s" -# -# ## Maximum allowed http request body size in bytes. -# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) -# max_body_size = "500MiB" -# -# ## Maximum line size allowed to be sent in bytes. -# ## 0 means to use the default of 65536 bytes (64 kibibytes) -# max_line_size = "64KiB" -# -# ## Set one or more allowed client CA certificate file names to -# ## enable mutually authenticated TLS connections -# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] -# -# ## Add service certificate and key -# tls_cert = "/etc/telegraf/cert.pem" -# tls_key = "/etc/telegraf/key.pem" -# -# ## Optional username and password to accept for HTTP basic authentication. -# ## You probably want to make sure you have TLS configured above for this. -# # basic_username = "foobar" -# # basic_password = "barfoo" - - -# # Read JTI OpenConfig Telemetry from listed sensors -# [[inputs.jti_openconfig_telemetry]] -# ## List of device addresses to collect telemetry from -# servers = ["localhost:1883"] -# -# ## Authentication details. Username and password are must if device expects -# ## authentication. Client ID must be unique when connecting from multiple instances -# ## of telegraf to the same device -# username = "user" -# password = "pass" -# client_id = "telegraf" -# -# ## Frequency to get data -# sample_frequency = "1000ms" -# -# ## Sensors to subscribe for -# ## A identifier for each sensor can be provided in path by separating with space -# ## Else sensor path will be used as identifier -# ## When identifier is used, we can provide a list of space separated sensors. -# ## A single subscription will be created with all these sensors and data will -# ## be saved to measurement with this identifier name -# sensors = [ -# "/interfaces/", -# "collection /components/ /lldp", -# ] -# -# ## We allow specifying sensor group level reporting rate. To do this, specify the -# ## reporting rate in Duration at the beginning of sensor paths / collection -# ## name. For entries without reporting rate, we use configured sample frequency -# sensors = [ -# "1000ms customReporting /interfaces /lldp", -# "2000ms collection /components", -# "/interfaces", -# ] -# -# ## x509 Certificate to use with TLS connection. If it is not provided, an insecure -# ## channel will be opened with server -# ssl_cert = "/etc/telegraf/cert.pem" -# -# ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms. -# ## Failed streams/calls will not be retried if 0 is provided -# retry_delay = "1000ms" -# -# ## To treat all string values as tags, set this to true -# str_as_tags = false - - -# # Read metrics from Kafka topic(s) -# [[inputs.kafka_consumer]] -# ## kafka servers -# brokers = ["localhost:9092"] -# ## topic(s) to consume -# topics = ["telegraf"] -# -# ## Optional Client id -# # client_id = "Telegraf" -# -# ## Set the minimal supported Kafka version. Setting this enables the use of new -# ## Kafka features and APIs. Of particular interest, lz4 compression -# ## requires at least version 0.10.0.0. -# ## ex: version = "1.1.0" -# # version = "" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# -# ## Optional SASL Config -# # sasl_username = "kafka" -# # sasl_password = "secret" -# -# ## the name of the consumer group -# consumer_group = "telegraf_metrics_consumers" -# ## Offset (must be either "oldest" or "newest") -# offset = "oldest" -# ## Maximum length of a message to consume, in bytes (default 0/unlimited); -# ## larger messages are dropped -# max_message_len = 1000000 -# -# ## Maximum messages to read from the broker that have not been written by an -# ## output. For best throughput set based on the number of metrics within -# ## each message and the size of the output's metric_batch_size. -# ## -# ## For example, if each message from the queue contains 10 metrics and the -# ## output metric_batch_size is 1000, setting this to 100 will ensure that a -# ## full batch is collected and the write is triggered immediately without -# ## waiting until the next flush_interval. -# # max_undelivered_messages = 1000 -# -# ## Data format to consume. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "influx" - - -# # Read metrics from Kafka topic(s) -# [[inputs.kafka_consumer_legacy]] -# ## topic(s) to consume -# topics = ["telegraf"] -# ## an array of Zookeeper connection strings -# zookeeper_peers = ["localhost:2181"] -# ## Zookeeper Chroot -# zookeeper_chroot = "" -# ## the name of the consumer group -# consumer_group = "telegraf_metrics_consumers" -# ## Offset (must be either "oldest" or "newest") -# offset = "oldest" -# -# ## Data format to consume. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "influx" -# -# ## Maximum length of a message to consume, in bytes (default 0/unlimited); -# ## larger messages are dropped -# max_message_len = 65536 - - -# # Stream and parse log file(s). -# [[inputs.logparser]] -# ## Log files to parse. -# ## These accept standard unix glob matching rules, but with the addition of -# ## ** as a "super asterisk". ie: -# ## /var/log/**.log -> recursively find all .log files in /var/log -# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log -# ## /var/log/apache.log -> only tail the apache log file -# files = ["/var/log/apache/access.log"] -# -# ## Read files that currently exist from the beginning. Files that are created -# ## while telegraf is running (and that match the "files" globs) will always -# ## be read from the beginning. -# from_beginning = false -# -# ## Method used to watch for file updates. Can be either "inotify" or "poll". -# # watch_method = "inotify" -# -# ## Parse logstash-style "grok" patterns: -# [inputs.logparser.grok] -# ## This is a list of patterns to check the given log file(s) for. -# ## Note that adding patterns here increases processing time. The most -# ## efficient configuration is to have one pattern per logparser. -# ## Other common built-in patterns are: -# ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) -# ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) -# patterns = ["%{COMBINED_LOG_FORMAT}"] -# -# ## Name of the outputted measurement name. -# measurement = "apache_access_log" -# -# ## Full path(s) to custom pattern files. -# custom_pattern_files = [] -# -# ## Custom patterns can also be defined here. Put one pattern per line. -# custom_patterns = ''' -# ''' -# -# ## Timezone allows you to provide an override for timestamps that -# ## don't already include an offset -# ## e.g. 04/06/2016 12:41:45 data one two 5.43µs -# ## -# ## Default: "" which renders UTC -# ## Options are as follows: -# ## 1. Local -- interpret based on machine localtime -# ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones -# ## 3. UTC -- or blank/unspecified, will return timestamp in UTC -# # timezone = "Canada/Eastern" - - -# # Read metrics from MQTT topic(s) -# [[inputs.mqtt_consumer]] -# ## MQTT broker URLs to be used. The format should be scheme://host:port, -# ## schema can be tcp, ssl, or ws. -# servers = ["tcp://localhost:1883"] -# -# ## QoS policy for messages -# ## 0 = at most once -# ## 1 = at least once -# ## 2 = exactly once -# ## -# ## When using a QoS of 1 or 2, you should enable persistent_session to allow -# ## resuming unacknowledged messages. -# qos = 0 -# -# ## Connection timeout for initial connection in seconds -# connection_timeout = "30s" -# -# ## Maximum messages to read from the broker that have not been written by an -# ## output. For best throughput set based on the number of metrics within -# ## each message and the size of the output's metric_batch_size. -# ## -# ## For example, if each message from the queue contains 10 metrics and the -# ## output metric_batch_size is 1000, setting this to 100 will ensure that a -# ## full batch is collected and the write is triggered immediately without -# ## waiting until the next flush_interval. -# # max_undelivered_messages = 1000 -# -# ## Topics to subscribe to -# topics = [ -# "telegraf/host01/cpu", -# "telegraf/+/mem", -# "sensors/#", -# ] -# -# # if true, messages that can't be delivered while the subscriber is offline -# # will be delivered when it comes back (such as on service restart). -# # NOTE: if true, client_id MUST be set -# persistent_session = false -# # If empty, a random client ID will be generated. -# client_id = "" -# -# ## username and password to connect MQTT server. -# # username = "telegraf" -# # password = "metricsmetricsmetricsmetrics" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# -# ## Data format to consume. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "influx" - - -# # Read metrics from NATS subject(s) -# [[inputs.nats_consumer]] -# ## urls of NATS servers -# servers = ["nats://localhost:4222"] -# ## Use Transport Layer Security -# secure = false -# ## subject(s) to consume -# subjects = ["telegraf"] -# ## name a queue group -# queue_group = "telegraf_consumers" -# -# ## Sets the limits for pending msgs and bytes for each subscription -# ## These shouldn't need to be adjusted except in very high throughput scenarios -# # pending_message_limit = 65536 -# # pending_bytes_limit = 67108864 -# -# ## Maximum messages to read from the broker that have not been written by an -# ## output. For best throughput set based on the number of metrics within -# ## each message and the size of the output's metric_batch_size. -# ## -# ## For example, if each message from the queue contains 10 metrics and the -# ## output metric_batch_size is 1000, setting this to 100 will ensure that a -# ## full batch is collected and the write is triggered immediately without -# ## waiting until the next flush_interval. -# # max_undelivered_messages = 1000 -# -# ## Data format to consume. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "influx" - - -# # Read NSQ topic for metrics. -# [[inputs.nsq_consumer]] -# ## Server option still works but is deprecated, we just prepend it to the nsqd array. -# # server = "localhost:4150" -# ## An array representing the NSQD TCP HTTP Endpoints -# nsqd = ["localhost:4150"] -# ## An array representing the NSQLookupd HTTP Endpoints -# nsqlookupd = ["localhost:4161"] -# topic = "telegraf" -# channel = "consumer" -# max_in_flight = 100 -# -# ## Maximum messages to read from the broker that have not been written by an -# ## output. For best throughput set based on the number of metrics within -# ## each message and the size of the output's metric_batch_size. -# ## -# ## For example, if each message from the queue contains 10 metrics and the -# ## output metric_batch_size is 1000, setting this to 100 will ensure that a -# ## full batch is collected and the write is triggered immediately without -# ## waiting until the next flush_interval. -# # max_undelivered_messages = 1000 -# -# ## Data format to consume. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "influx" - - -# # Read metrics from one or many pgbouncer servers -# [[inputs.pgbouncer]] -# ## specify address via a url matching: -# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ -# ## ?sslmode=[disable|verify-ca|verify-full] -# ## or a simple string: -# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production -# ## -# ## All connection parameters are optional. -# ## -# address = "host=localhost user=pgbouncer sslmode=disable" - - -# # Read metrics from one or many postgresql servers -# [[inputs.postgresql]] -# ## specify address via a url matching: -# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ -# ## ?sslmode=[disable|verify-ca|verify-full] -# ## or a simple string: -# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production -# ## -# ## All connection parameters are optional. -# ## -# ## Without the dbname parameter, the driver will default to a database -# ## with the same name as the user. This dbname is just for instantiating a -# ## connection with the server and doesn't restrict the databases we are trying -# ## to grab metrics for. -# ## -# address = "host=localhost user=postgres sslmode=disable" -# ## A custom name for the database that will be used as the "server" tag in the -# ## measurement output. If not specified, a default one generated from -# ## the connection address is used. -# # outputaddress = "db01" -# -# ## connection configuration. -# ## maxlifetime - specify the maximum lifetime of a connection. -# ## default is forever (0s) -# max_lifetime = "0s" -# -# ## A list of databases to explicitly ignore. If not specified, metrics for all -# ## databases are gathered. Do NOT use with the 'databases' option. -# # ignored_databases = ["postgres", "template0", "template1"] -# -# ## A list of databases to pull metrics about. If not specified, metrics for all -# ## databases are gathered. Do NOT use with the 'ignored_databases' option. -# # databases = ["app_production", "testing"] - - -# # Read metrics from one or many postgresql servers -# [[inputs.postgresql_extensible]] -# ## specify address via a url matching: -# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ -# ## ?sslmode=[disable|verify-ca|verify-full] -# ## or a simple string: -# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production -# # -# ## All connection parameters are optional. # -# ## Without the dbname parameter, the driver will default to a database -# ## with the same name as the user. This dbname is just for instantiating a -# ## connection with the server and doesn't restrict the databases we are trying -# ## to grab metrics for. -# # -# address = "host=localhost user=postgres sslmode=disable" -# -# ## connection configuration. -# ## maxlifetime - specify the maximum lifetime of a connection. -# ## default is forever (0s) -# max_lifetime = "0s" -# -# ## A list of databases to pull metrics about. If not specified, metrics for all -# ## databases are gathered. -# ## databases = ["app_production", "testing"] -# # -# ## A custom name for the database that will be used as the "server" tag in the -# ## measurement output. If not specified, a default one generated from -# ## the connection address is used. -# # outputaddress = "db01" -# # -# ## Define the toml config where the sql queries are stored -# ## New queries can be added, if the withdbname is set to true and there is no -# ## databases defined in the 'databases field', the sql query is ended by a -# ## 'is not null' in order to make the query succeed. -# ## Example : -# ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become -# ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')" -# ## because the databases variable was set to ['postgres', 'pgbench' ] and the -# ## withdbname was true. Be careful that if the withdbname is set to false you -# ## don't have to define the where clause (aka with the dbname) the tagvalue -# ## field is used to define custom tags (separated by commas) -# ## The optional "measurement" value can be used to override the default -# ## output measurement name ("postgresql"). -# # -# ## Structure : -# ## [[inputs.postgresql_extensible.query]] -# ## sqlquery string -# ## version string -# ## withdbname boolean -# ## tagvalue string (comma separated) -# ## measurement string -# [[inputs.postgresql_extensible.query]] -# sqlquery="SELECT * FROM pg_stat_database" -# version=901 -# withdbname=false -# tagvalue="" -# measurement="" -# [[inputs.postgresql_extensible.query]] -# sqlquery="SELECT * FROM pg_stat_bgwriter" -# version=901 -# withdbname=false -# tagvalue="postgresql.stats" - - -# # Read metrics from one or many prometheus clients -# [[inputs.prometheus]] -# ## An array of urls to scrape metrics from. -# urls = ["http://localhost:9100/metrics"] -# -# ## An array of Kubernetes services to scrape metrics from. -# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] -# -# ## Kubernetes config file to create client from. -# # kube_config = "/path/to/kubernetes.config" -# -# ## Scrape Kubernetes pods for the following prometheus annotations: -# ## - prometheus.io/scrape: Enable scraping for this pod -# ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to -# ## set this to 'https' & most likely set the tls config. -# ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. -# ## - prometheus.io/port: If port is not 9102 use this annotation -# # monitor_kubernetes_pods = true -# -# ## Use bearer token for authorization -# # bearer_token = /path/to/bearer/token -# -# ## Specify timeout duration for slower prometheus clients (default is 3s) -# # response_timeout = "3s" -# -# ## Optional TLS Config -# # tls_ca = /path/to/cafile -# # tls_cert = /path/to/certfile -# # tls_key = /path/to/keyfile -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - -# # Generic socket listener capable of handling multiple socket types. -# [[inputs.socket_listener]] -# ## URL to listen on -# # service_address = "tcp://:8094" -# # service_address = "tcp://127.0.0.1:http" -# # service_address = "tcp4://:8094" -# # service_address = "tcp6://:8094" -# # service_address = "tcp6://[2001:db8::1]:8094" -# # service_address = "udp://:8094" -# # service_address = "udp4://:8094" -# # service_address = "udp6://:8094" -# # service_address = "unix:///tmp/telegraf.sock" -# # service_address = "unixgram:///tmp/telegraf.sock" -# -# ## Maximum number of concurrent connections. -# ## Only applies to stream sockets (e.g. TCP). -# ## 0 (default) is unlimited. -# # max_connections = 1024 -# -# ## Read timeout. -# ## Only applies to stream sockets (e.g. TCP). -# ## 0 (default) is unlimited. -# # read_timeout = "30s" -# -# ## Optional TLS configuration. -# ## Only applies to stream sockets (e.g. TCP). -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Enables client authentication if set. -# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] -# -# ## Maximum socket buffer size (in bytes when no unit specified). -# ## For stream sockets, once the buffer fills up, the sender will start backing up. -# ## For datagram sockets, once the buffer fills up, metrics will start dropping. -# ## Defaults to the OS default. -# # read_buffer_size = "64KiB" -# -# ## Period between keep alive probes. -# ## Only applies to TCP sockets. -# ## 0 disables keep alive probes. -# ## Defaults to the OS configuration. -# # keep_alive_period = "5m" -# -# ## Data format to consume. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# # data_format = "influx" -#[[inputs.socket_listener]] -# service_address = "udp://:8094" -# data_format = "influx" - - -# # Statsd UDP/TCP Server -# [[inputs.statsd]] -# ## Protocol, must be "tcp", "udp", "udp4" or "udp6" (default=udp) -# protocol = "udp" -# -# ## MaxTCPConnection - applicable when protocol is set to tcp (default=250) -# max_tcp_connections = 250 -# -# ## Enable TCP keep alive probes (default=false) -# tcp_keep_alive = false -# -# ## Specifies the keep-alive period for an active network connection. -# ## Only applies to TCP sockets and will be ignored if tcp_keep_alive is false. -# ## Defaults to the OS configuration. -# # tcp_keep_alive_period = "2h" -# -# ## Address and port to host UDP listener on -# service_address = ":8125" -# -# ## The following configuration options control when telegraf clears it's cache -# ## of previous values. If set to false, then telegraf will only clear it's -# ## cache when the daemon is restarted. -# ## Reset gauges every interval (default=true) -# delete_gauges = true -# ## Reset counters every interval (default=true) -# delete_counters = true -# ## Reset sets every interval (default=true) -# delete_sets = true -# ## Reset timings & histograms every interval (default=true) -# delete_timings = true -# -# ## Percentiles to calculate for timing & histogram stats -# percentiles = [90] -# -# ## separator to use between elements of a statsd metric -# metric_separator = "_" -# -# ## Parses tags in the datadog statsd format -# ## http://docs.datadoghq.com/guides/dogstatsd/ -# parse_data_dog_tags = false -# -# ## Statsd data translation templates, more info can be read here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md -# # templates = [ -# # "cpu.* measurement*" -# # ] -# -# ## Number of UDP messages allowed to queue up, once filled, -# ## the statsd server will start dropping packets -# allowed_pending_messages = 10000 -# -# ## Number of timing/histogram values to track per-measurement in the -# ## calculation of percentiles. Raising this limit increases the accuracy -# ## of percentiles but also increases the memory usage and cpu time. -# percentile_limit = 1000 - - -# # Accepts syslog messages per RFC5425 -# [[inputs.syslog]] -# ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514 -# ## Protocol, address and port to host the syslog receiver. -# ## If no host is specified, then localhost is used. -# ## If no port is specified, 6514 is used (RFC5425#section-4.1). -# server = "tcp://:6514" -# -# ## TLS Config -# # tls_allowed_cacerts = ["/etc/telegraf/ca.pem"] -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# -# ## Period between keep alive probes. -# ## 0 disables keep alive probes. -# ## Defaults to the OS configuration. -# ## Only applies to stream sockets (e.g. TCP). -# # keep_alive_period = "5m" -# -# ## Maximum number of concurrent connections (default = 0). -# ## 0 means unlimited. -# ## Only applies to stream sockets (e.g. TCP). -# # max_connections = 1024 -# -# ## Read timeout is the maximum time allowed for reading a single message (default = 5s). -# ## 0 means unlimited. -# # read_timeout = "5s" -# -# ## Whether to parse in best effort mode or not (default = false). -# ## By default best effort parsing is off. -# # best_effort = false -# -# ## Character to prepend to SD-PARAMs (default = "_"). -# ## A syslog message can contain multiple parameters and multiple identifiers within structured data section. -# ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"] -# ## For each combination a field is created. -# ## Its name is created concatenating identifier, sdparam_separator, and parameter name. -# # sdparam_separator = "_" - - -# # Stream a log file, like the tail -f command -# [[inputs.tail]] -# ## files to tail. -# ## These accept standard unix glob matching rules, but with the addition of -# ## ** as a "super asterisk". ie: -# ## "/var/log/**.log" -> recursively find all .log files in /var/log -# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log -# ## "/var/log/apache.log" -> just tail the apache log file -# ## -# ## See https://github.com/gobwas/glob for more examples -# ## -# files = ["/var/mymetrics.out"] -# ## Read file from beginning. -# from_beginning = false -# ## Whether file is a named pipe -# pipe = false -# -# ## Method used to watch for file updates. Can be either "inotify" or "poll". -# # watch_method = "inotify" -# -# ## Data format to consume. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "influx" - - -# # Generic TCP listener -# [[inputs.tcp_listener]] -# # DEPRECATED: the TCP listener plugin has been deprecated in favor of the -# # socket_listener plugin -# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener - - -# # Generic UDP listener -# [[inputs.udp_listener]] -# # DEPRECATED: the TCP listener plugin has been deprecated in favor of the -# # socket_listener plugin -# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener - - -# # Read metrics from VMware vCenter -# [[inputs.vsphere]] -# ## List of vCenter URLs to be monitored. These three lines must be uncommented -# ## and edited for the plugin to work. -# vcenters = [ "https://vcenter.local/sdk" ] -# username = "user@corp.local" -# password = "secret" -# -# ## VMs -# ## Typical VM metrics (if omitted or empty, all metrics are collected) -# vm_metric_include = [ -# "cpu.demand.average", -# "cpu.idle.summation", -# "cpu.latency.average", -# "cpu.readiness.average", -# "cpu.ready.summation", -# "cpu.run.summation", -# "cpu.usagemhz.average", -# "cpu.used.summation", -# "cpu.wait.summation", -# "mem.active.average", -# "mem.granted.average", -# "mem.latency.average", -# "mem.swapin.average", -# "mem.swapinRate.average", -# "mem.swapout.average", -# "mem.swapoutRate.average", -# "mem.usage.average", -# "mem.vmmemctl.average", -# "net.bytesRx.average", -# "net.bytesTx.average", -# "net.droppedRx.summation", -# "net.droppedTx.summation", -# "net.usage.average", -# "power.power.average", -# "virtualDisk.numberReadAveraged.average", -# "virtualDisk.numberWriteAveraged.average", -# "virtualDisk.read.average", -# "virtualDisk.readOIO.latest", -# "virtualDisk.throughput.usage.average", -# "virtualDisk.totalReadLatency.average", -# "virtualDisk.totalWriteLatency.average", -# "virtualDisk.write.average", -# "virtualDisk.writeOIO.latest", -# "sys.uptime.latest", -# ] -# # vm_metric_exclude = [] ## Nothing is excluded by default -# # vm_instances = true ## true by default -# -# ## Hosts -# ## Typical host metrics (if omitted or empty, all metrics are collected) -# host_metric_include = [ -# "cpu.coreUtilization.average", -# "cpu.costop.summation", -# "cpu.demand.average", -# "cpu.idle.summation", -# "cpu.latency.average", -# "cpu.readiness.average", -# "cpu.ready.summation", -# "cpu.swapwait.summation", -# "cpu.usage.average", -# "cpu.usagemhz.average", -# "cpu.used.summation", -# "cpu.utilization.average", -# "cpu.wait.summation", -# "disk.deviceReadLatency.average", -# "disk.deviceWriteLatency.average", -# "disk.kernelReadLatency.average", -# "disk.kernelWriteLatency.average", -# "disk.numberReadAveraged.average", -# "disk.numberWriteAveraged.average", -# "disk.read.average", -# "disk.totalReadLatency.average", -# "disk.totalWriteLatency.average", -# "disk.write.average", -# "mem.active.average", -# "mem.latency.average", -# "mem.state.latest", -# "mem.swapin.average", -# "mem.swapinRate.average", -# "mem.swapout.average", -# "mem.swapoutRate.average", -# "mem.totalCapacity.average", -# "mem.usage.average", -# "mem.vmmemctl.average", -# "net.bytesRx.average", -# "net.bytesTx.average", -# "net.droppedRx.summation", -# "net.droppedTx.summation", -# "net.errorsRx.summation", -# "net.errorsTx.summation", -# "net.usage.average", -# "power.power.average", -# "storageAdapter.numberReadAveraged.average", -# "storageAdapter.numberWriteAveraged.average", -# "storageAdapter.read.average", -# "storageAdapter.write.average", -# "sys.uptime.latest", -# ] -# # host_metric_exclude = [] ## Nothing excluded by default -# # host_instances = true ## true by default -# -# ## Clusters -# # cluster_metric_include = [] ## if omitted or empty, all metrics are collected -# # cluster_metric_exclude = [] ## Nothing excluded by default -# # cluster_instances = true ## true by default -# -# ## Datastores -# # datastore_metric_include = [] ## if omitted or empty, all metrics are collected -# # datastore_metric_exclude = [] ## Nothing excluded by default -# # datastore_instances = false ## false by default for Datastores only -# -# ## Datacenters -# datacenter_metric_include = [] ## if omitted or empty, all metrics are collected -# datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default. -# # datacenter_instances = false ## false by default for Datastores only -# -# ## Plugin Settings -# ## separator character to use for measurement and field names (default: "_") -# # separator = "_" -# -# ## number of objects to retreive per query for realtime resources (vms and hosts) -# ## set to 64 for vCenter 5.5 and 6.0 (default: 256) -# # max_query_objects = 256 -# -# ## number of metrics to retreive per query for non-realtime resources (clusters and datastores) -# ## set to 64 for vCenter 5.5 and 6.0 (default: 256) -# # max_query_metrics = 256 -# -# ## number of go routines to use for collection and discovery of objects and metrics -# # collect_concurrency = 1 -# # discover_concurrency = 1 -# -# ## whether or not to force discovery of new objects on initial gather call before collecting metrics -# ## when true for large environments this may cause errors for time elapsed while collecting metrics -# ## when false (default) the first collection cycle may result in no or limited metrics while objects are discovered -# # force_discover_on_init = false -# -# ## the interval before (re)discovering objects subject to metrics collection (default: 300s) -# # object_discovery_interval = "300s" -# -# ## timeout applies to any of the api request made to vcenter -# # timeout = "20s" -# -# ## Optional SSL Config -# # ssl_ca = "/path/to/cafile" -# # ssl_cert = "/path/to/certfile" -# # ssl_key = "/path/to/keyfile" -# ## Use SSL but skip chain & host verification -# # insecure_skip_verify = false - - -# # A Webhooks Event collector -# [[inputs.webhooks]] -# ## Address and port to host Webhook listener on -# service_address = ":1619" -# -# [inputs.webhooks.filestack] -# path = "/filestack" -# -# [inputs.webhooks.github] -# path = "/github" -# # secret = "" -# -# [inputs.webhooks.mandrill] -# path = "/mandrill" -# -# [inputs.webhooks.rollbar] -# path = "/rollbar" -# -# [inputs.webhooks.papertrail] -# path = "/papertrail" -# -# [inputs.webhooks.particle] -# path = "/particle" - - -# # This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures. -# [[inputs.zipkin]] -# # path = "/api/v1/spans" # URL path for span data -# # port = 9411 # Port on which Telegraf listens diff --git a/setup/so-functions b/setup/so-functions index 983f20b40..4beb8c0c1 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1359,6 +1359,11 @@ soc_pillar() { touch $soc_pillar_file } +telegraf_pillar() + title "Creating telegraf pillar" + touch $adv_telegraf_pillar_file + touch $telegraf_pillar_file + manager_pillar() { touch $adv_manager_pillar_file title "Create the manager pillar" diff --git a/setup/so-variables b/setup/so-variables index 09f6cbd37..8833154eb 100644 --- a/setup/so-variables +++ b/setup/so-variables @@ -84,7 +84,7 @@ mkdir -p $local_salt_dir/salt/firewall/hostgroups mkdir -p $local_salt_dir/salt/firewall/portgroups mkdir -p $local_salt_dir/salt/firewall/ports -for THEDIR in bpf pcap elasticsearch ntp firewall redis backup strelka sensoroni curator soc soctopus docker zeek suricata nginx filebeat logstash soc manager kratos idstools idh elastalert +for THEDIR in bpf pcap elasticsearch ntp firewall redis backup strelka sensoroni curator soc soctopus docker zeek suricata nginx telegraf filebeat logstash soc manager kratos idstools idh elastalert do mkdir -p $local_salt_dir/pillar/$THEDIR touch $local_salt_dir/pillar/$THEDIR/adv_$THEDIR.sls @@ -203,4 +203,10 @@ idh_pillar_file="$local_salt_dir/pillar/idh/soc_idh.sls" export idh_pillar_file adv_idh_pillar_file="$local_salt_dir/pillar/idh/adv_idh.sls" -export adv_idh_pillar_file \ No newline at end of file +export adv_idh_pillar_file + +telegraf_pillar_file="$local_salt_dir/pillar/telegraf/soc_telegraf.sls" +export telegraf_pillar_file + +adv_telegraf_pillar_file="$local_salt_dir/pillar/telegraf/adv_telegraf.sls" +export adv_telegraf_pillar_file