Fix typos and fix error related to elasticsearch saltstate being called from logstash state. Logstash will be removed from kafkanodes in future

Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
This commit is contained in:
reyesj2
2024-04-02 16:37:39 -04:00
parent 7f5741c43b
commit 82830c8173
2 changed files with 123 additions and 123 deletions

View File

@@ -1,123 +1,123 @@
# This configuration file is intended for use in KRaft mode, where # This configuration file is intended for use in KRaft mode, where
# Apache ZooKeeper is not present. See config/kraft/README.md for details. # Apache ZooKeeper is not present. See config/kraft/README.md for details.
# #
############################# Server Basics ############################# ############################# Server Basics #############################
# The role of this server. Setting this puts us in KRaft mode # The role of this server. Setting this puts us in KRaft mode
process.roles=broker,controller process.roles=broker,controller
# The node id associated with this instance's roles # The node id associated with this instance's roles
node.id={{ kafka_nodeid }} node.id={{ kafka_nodeid }}
# The connect string for the controller quorum # The connect string for the controller quorum
controller.quorum.voters={{ kraft_controller_quorum_voters }} controller.quorum.voters={{ kraft_controller_quorum_voters }}
############################# Socket Server Settings ############################# ############################# Socket Server Settings #############################
# The address the socket server listens on. # The address the socket server listens on.
# Combined nodes (i.e. those with `process.roles=broker,controller`) must list the controller listener here at a minimum. # Combined nodes (i.e. those with `process.roles=broker,controller`) must list the controller listener here at a minimum.
# If the broker listener is not defined, the default listener will use a host name that is equal to the value of java.net.InetAddress.getCanonicalHostName(), # If the broker listener is not defined, the default listener will use a host name that is equal to the value of java.net.InetAddress.getCanonicalHostName(),
# with PLAINTEXT listener name, and port 9092. # with PLAINTEXT listener name, and port 9092.
# FORMAT: # FORMAT:
# listeners = listener_name://host_name:port # listeners = listener_name://host_name:port
# EXAMPLE: # EXAMPLE:
# listeners = PLAINTEXT://your.host.name:9092 # listeners = PLAINTEXT://your.host.name:9092
listeners=BROKER://{{ kafka_ip }}:9092,CONTROLLER://{{ kafka_ip }}:9093 listeners=BROKER://{{ kafka_ip }}:9092,CONTROLLER://{{ kafka_ip }}:9093
# Name of listener used for communication between brokers. # Name of listener used for communication between brokers.
inter.broker.listener.name=BROKER inter.broker.listener.name=BROKER
# Listener name, hostname and port the broker will advertise to clients. # Listener name, hostname and port the broker will advertise to clients.
# If not set, it uses the value for "listeners". # If not set, it uses the value for "listeners".
advertised.listeners=BROKER://{{ kafka_ip }}:9092 advertised.listeners=BROKER://{{ kafka_ip }}:9092
# A comma-separated list of the names of the listeners used by the controller. # A comma-separated list of the names of the listeners used by the controller.
# If no explicit mapping set in `listener.security.protocol.map`, default will be using PLAINTEXT protocol # If no explicit mapping set in `listener.security.protocol.map`, default will be using PLAINTEXT protocol
# This is required if running in KRaft mode. # This is required if running in KRaft mode.
controller.listener.names=CONTROLLER controller.listener.names=CONTROLLER
# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details # Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
listener.security.protocol.map=CONTROLLER:SSL,BROKER:SSL listener.security.protocol.map=CONTROLLER:SSL,BROKER:SSL
#SSL configuration #SSL configuration
ssl.keystore.location=/etc/pki/kafka.jks ssl.keystore.location=/etc/pki/kafka.jks
ssl.keystore.pasword=changeit ssl.keystore.password=changeit
ssl.keystore.type=JKS ssl.keystore.type=JKS
ssl.truststore.location=/etc/pki/java/sos/cacerts ssl.truststore.location=/etc/pki/java/sos/cacerts
ssl.truststore.password=changeit ssl.truststore.password=changeit
# The number of threads that the server uses for receiving requests from the network and sending responses to the network # The number of threads that the server uses for receiving requests from the network and sending responses to the network
num.network.threads=3 num.network.threads=3
# The number of threads that the server uses for processing requests, which may include disk I/O # The number of threads that the server uses for processing requests, which may include disk I/O
num.io.threads=8 num.io.threads=8
# The send buffer (SO_SNDBUF) used by the socket server # The send buffer (SO_SNDBUF) used by the socket server
socket.send.buffer.bytes=102400 socket.send.buffer.bytes=102400
# The receive buffer (SO_RCVBUF) used by the socket server # The receive buffer (SO_RCVBUF) used by the socket server
socket.receive.buffer.bytes=102400 socket.receive.buffer.bytes=102400
# The maximum size of a request that the socket server will accept (protection against OOM) # The maximum size of a request that the socket server will accept (protection against OOM)
socket.request.max.bytes=104857600 socket.request.max.bytes=104857600
############################# Log Basics ############################# ############################# Log Basics #############################
# A comma separated list of directories under which to store log files # A comma separated list of directories under which to store log files
log.dirs=/nsm/kafka/data log.dirs=/nsm/kafka/data
# The default number of log partitions per topic. More partitions allow greater # The default number of log partitions per topic. More partitions allow greater
# parallelism for consumption, but this will also result in more files across # parallelism for consumption, but this will also result in more files across
# the brokers. # the brokers.
num.partitions=1 num.partitions=1
# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
# This value is recommended to be increased for installations with data dirs located in RAID array. # This value is recommended to be increased for installations with data dirs located in RAID array.
num.recovery.threads.per.data.dir=1 num.recovery.threads.per.data.dir=1
############################# Internal Topic Settings ############################# ############################# Internal Topic Settings #############################
# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" # The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. # For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
offsets.topic.replication.factor=1 offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1 transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1 transaction.state.log.min.isr=1
############################# Log Flush Policy ############################# ############################# Log Flush Policy #############################
# Messages are immediately written to the filesystem but by default we only fsync() to sync # Messages are immediately written to the filesystem but by default we only fsync() to sync
# the OS cache lazily. The following configurations control the flush of data to disk. # the OS cache lazily. The following configurations control the flush of data to disk.
# There are a few important trade-offs here: # There are a few important trade-offs here:
# 1. Durability: Unflushed data may be lost if you are not using replication. # 1. Durability: Unflushed data may be lost if you are not using replication.
# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. # 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks. # 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
# The settings below allow one to configure the flush policy to flush data after a period of time or # The settings below allow one to configure the flush policy to flush data after a period of time or
# every N messages (or both). This can be done globally and overridden on a per-topic basis. # every N messages (or both). This can be done globally and overridden on a per-topic basis.
# The number of messages to accept before forcing a flush of data to disk # The number of messages to accept before forcing a flush of data to disk
#log.flush.interval.messages=10000 #log.flush.interval.messages=10000
# The maximum amount of time a message can sit in a log before we force a flush # The maximum amount of time a message can sit in a log before we force a flush
#log.flush.interval.ms=1000 #log.flush.interval.ms=1000
############################# Log Retention Policy ############################# ############################# Log Retention Policy #############################
# The following configurations control the disposal of log segments. The policy can # The following configurations control the disposal of log segments. The policy can
# be set to delete segments after a period of time, or after a given size has accumulated. # be set to delete segments after a period of time, or after a given size has accumulated.
# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
# from the end of the log. # from the end of the log.
# The minimum age of a log file to be eligible for deletion due to age # The minimum age of a log file to be eligible for deletion due to age
log.retention.hours=168 log.retention.hours=168
# A size-based retention policy for logs. Segments are pruned from the log unless the remaining # A size-based retention policy for logs. Segments are pruned from the log unless the remaining
# segments drop below log.retention.bytes. Functions independently of log.retention.hours. # segments drop below log.retention.bytes. Functions independently of log.retention.hours.
#log.retention.bytes=1073741824 #log.retention.bytes=1073741824
# The maximum size of a log segment file. When this size is reached a new log segment will be created. # The maximum size of a log segment file. When this size is reached a new log segment will be created.
log.segment.bytes=1073741824 log.segment.bytes=1073741824
# The interval at which log segments are checked to see if they can be deleted according # The interval at which log segments are checked to see if they can be deleted according
# to the retention policies # to the retention policies
log.retention.check.interval.ms=300000 log.retention.check.interval.ms=300000

View File

@@ -12,7 +12,7 @@
include: include:
- ssl - ssl
{% if GLOBALS.role not in ['so-receiver','so-fleet'] %} {% if GLOBALS.role not in ['so-receiver','so-fleet', 'so-kafkanode'] %}
- elasticsearch - elasticsearch
{% endif %} {% endif %}