mirror of https://github.com/apache/kafka.git
139 lines
5.5 KiB
Properties
139 lines
5.5 KiB
Properties
# Licensed to the Apache Software Foundation (ASF) under one or more
|
|
# contributor license agreements. See the NOTICE file distributed with
|
|
# this work for additional information regarding copyright ownership.
|
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
|
# (the "License"); you may not use this file except in compliance with
|
|
# the License. You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
# See org.apache.kafka.clients.producer.ProducerConfig for more details.
|
|
# Consider using environment variables or external configuration management
|
|
# for sensitive information like passwords and environment-specific settings.
|
|
|
|
##################### Producer Basics #####################
|
|
|
|
# List of Kafka brokers used for initial cluster discovery and metadata retrieval.
|
|
# Format: host1:port1,host2:port2,host3:port3
|
|
# Include all brokers for high availability.
|
|
bootstrap.servers=localhost:9092
|
|
|
|
# Client identifier for logging and metrics.
|
|
# Helps with debugging and monitoring.
|
|
client.id=test-producer
|
|
|
|
##################### Transaction Support #####################
|
|
|
|
# Transactional ID for the producer.
|
|
# Must be unique across all producer instances.
|
|
# Enables exactly-once semantics across multiple partitions/topics.
|
|
#transactional.id=test-transactional-id
|
|
|
|
# Maximum amount of time in milliseconds that a transaction will remain open.
|
|
# Only applies when transactional.id is set.
|
|
transaction.timeout.ms=60000
|
|
|
|
##################### Partitioning #####################
|
|
|
|
# Name of the partitioner class for partitioning records.
|
|
# Default uses "sticky" partitioning which improves throughput by filling batches
|
|
# Options: DefaultPartitioner, RoundRobinPartitioner, UniformStickyPartitioner.
|
|
#partitioner.class=org.apache.kafka.clients.producer.RoundRobinPartitioner
|
|
|
|
##################### Serialization #####################
|
|
|
|
# Serializer class for message keys.
|
|
# Common options: StringSerializer, ByteArraySerializer, AvroSerializer.
|
|
key.serializer=org.apache.kafka.common.serialization.StringSerializer
|
|
|
|
# Serializer class for message values.
|
|
value.serializer=org.apache.kafka.common.serialization.StringSerializer
|
|
|
|
##################### Reliability And Durability #####################
|
|
|
|
# Number of acknowledgments the producer requires the leader to have received.
|
|
# Options: 0 (no ack), 1 (leader only), all/-1 (all in-sync replicas).
|
|
# Use 'all' for maximum durability.
|
|
acks=all
|
|
|
|
# Number of retries for failed sends.
|
|
# Set to high value or Integer.MAX_VALUE for maximum reliability.
|
|
retries=2147483647
|
|
|
|
# Initial and max time to wait for failed request retries.
|
|
# The retry.backoff.ms is the initial backoff value and will increase exponentially
|
|
# for each failed request, up to the retry.backoff.max.ms value.
|
|
retry.backoff.ms=100
|
|
retry.backoff.max.ms=1000
|
|
|
|
# Enable idempotent producer to prevent duplicate messages.
|
|
# Ensures exactly-once delivery semantics when combined with proper consumer settings.
|
|
enable.idempotence=true
|
|
|
|
# Maximum number of unacknowledged requests the client will send on a single connection.
|
|
# Must be <= 5 when enable.idempotence=true to maintain ordering guarantees.
|
|
max.in.flight.requests.per.connection=5
|
|
|
|
##################### Timeouts And Blocking #####################
|
|
|
|
# Maximum amount of time the client will wait for the response of a request.
|
|
# Should be higher than replica.lag.time.max.ms (broker config).
|
|
request.timeout.ms=30000
|
|
|
|
# How long KafkaProducer.send() and KafkaProducer.partitionsFor() will block.
|
|
# Should be higher than request.timeout.ms.
|
|
max.block.ms=60000
|
|
|
|
# Timeout for broker requests, including produce requests.
|
|
# Should be greater than or equal to the sum of request.timeout.ms and linger.ms.
|
|
delivery.timeout.ms=120000
|
|
|
|
##################### Security Configuration #####################
|
|
|
|
# Security protocol for communication with brokers.
|
|
# Options: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL
|
|
#security.protocol=SASL_SSL
|
|
|
|
# SSL configuration.
|
|
#ssl.truststore.location=/path/to/truststore.jks
|
|
#ssl.truststore.password=truststore-password
|
|
|
|
# SASL configuration.
|
|
#sasl.mechanism=PLAIN
|
|
#sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \
|
|
# username="your-username" \
|
|
# password="your-password";
|
|
|
|
##################### Performance And Throughput #####################
|
|
|
|
# Compression codec for all data generated.
|
|
# Options: none, gzip, snappy, lz4, zstd.
|
|
# Can greatly improve throughput at the cost of increased CPU usage.
|
|
compression.type=none
|
|
|
|
# Producer will wait up to this delay to batch records together.
|
|
# Higher values increase throughput but add latency.
|
|
# Set to 0 for lowest latency, 5-100ms for balanced throughput/latency.
|
|
linger.ms=5
|
|
|
|
# Default batch size in bytes when batching multiple records sent to a partition.
|
|
# Larger batches improve throughput but use more memory.
|
|
# 16KB is a good starting point, adjust based on message size and throughput needs.
|
|
batch.size=16384
|
|
|
|
# Total bytes of memory the producer can use to buffer records waiting to be sent.
|
|
# Should be larger than batch.size * number of partitions you're writing to.
|
|
# 32MB is reasonable for most use cases.
|
|
buffer.memory=33554432
|
|
|
|
# Maximum size of a request in bytes.
|
|
# Should accommodate your largest batch size plus overhead.
|
|
# 1MB is default and suitable for most cases.
|
|
max.request.size=1048576
|