mirror of https://github.com/apache/kafka.git
				
				
				
			
		
			
				
	
	
		
			140 lines
		
	
	
		
			5.3 KiB
		
	
	
	
		
			Properties
		
	
	
	
			
		
		
	
	
			140 lines
		
	
	
		
			5.3 KiB
		
	
	
	
		
			Properties
		
	
	
	
| # Licensed to the Apache Software Foundation (ASF) under one or more
 | |
| # contributor license agreements.  See the NOTICE file distributed with
 | |
| # this work for additional information regarding copyright ownership.
 | |
| # The ASF licenses this file to You under the Apache License, Version 2.0
 | |
| # (the "License"); you may not use this file except in compliance with
 | |
| # the License.  You may obtain a copy of the License at
 | |
| # 
 | |
| #    http://www.apache.org/licenses/LICENSE-2.0
 | |
| # 
 | |
| # Unless required by applicable law or agreed to in writing, software
 | |
| # distributed under the License is distributed on an "AS IS" BASIS,
 | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | |
| # See the License for the specific language governing permissions and
 | |
| # limitations under the License.
 | |
| # see kafka.server.KafkaConfig for additional details and defaults
 | |
| 
 | |
| ############################# Server Basics #############################
 | |
| 
 | |
| # The id of the broker. This must be set to a unique integer for each broker.
 | |
| broker.id=0
 | |
| 
 | |
| # Hostname the broker will advertise to consumers. If not set, kafka will use the value returned
 | |
| # from InetAddress.getLocalHost().  If there are multiple interfaces getLocalHost
 | |
| # may not be what you want.
 | |
| #host.name=
 | |
| 
 | |
| 
 | |
| ############################# Socket Server Settings #############################
 | |
| 
 | |
| # The port the socket server listens on
 | |
| port=9091
 | |
| 
 | |
| # The number of threads handling network requests
 | |
| num.network.threads=2
 | |
|  
 | |
| # The number of threads doing disk I/O
 | |
| num.io.threads=2
 | |
| 
 | |
| # The send buffer (SO_SNDBUF) used by the socket server
 | |
| socket.send.buffer.bytes=1048576
 | |
| 
 | |
| # The receive buffer (SO_RCVBUF) used by the socket server
 | |
| socket.receive.buffer.bytes=1048576
 | |
| 
 | |
| # The maximum size of a request that the socket server will accept (protection against OOM)
 | |
| socket.request.max.bytes=104857600
 | |
| 
 | |
| 
 | |
| ############################# Log Basics #############################
 | |
| 
 | |
| # The directory under which to store log files
 | |
| log.dir=/tmp/kafka_server_logs
 | |
| 
 | |
| # The number of logical partitions per topic per server. More partitions allow greater parallelism
 | |
| # for consumption, but also mean more files.
 | |
| num.partitions=5
 | |
| 
 | |
| # Overrides for for the default given by num.partitions on a per-topic basis
 | |
| #topic.partition.count.map=topic1:3, topic2:4
 | |
| 
 | |
| ############################# Log Flush Policy #############################
 | |
| 
 | |
| # The following configurations control the flush of data to disk. This is the most
 | |
| # important performance knob in kafka.
 | |
| # There are a few important trade-offs here:
 | |
| #    1. Durability: Unflushed data is at greater risk of loss in the event of a crash.
 | |
| #    2. Latency: Data is not made available to consumers until it is flushed (which adds latency).
 | |
| #    3. Throughput: The flush is generally the most expensive operation. 
 | |
| # The settings below allow one to configure the flush policy to flush data after a period of time or
 | |
| # every N messages (or both). This can be done globally and overridden on a per-topic basis.
 | |
| 
 | |
| # The number of messages to accept before forcing a flush of data to disk
 | |
| log.flush.interval.messages=10000
 | |
| 
 | |
| # The maximum amount of time a message can sit in a log before we force a flush
 | |
| log.flush.interval.ms=1000
 | |
| 
 | |
| # Per-topic overrides for log.flush.interval.ms
 | |
| #log.flush.intervals.ms.per.topic=topic1:1000, topic2:3000
 | |
| 
 | |
| # The interval (in ms) at which logs are checked to see if they need to be flushed to disk.
 | |
| log.flush.scheduler.interval.ms=1000
 | |
| 
 | |
| ############################# Log Retention Policy #############################
 | |
| 
 | |
| # The following configurations control the disposal of log segments. The policy can
 | |
| # be set to delete segments after a period of time, or after a given size has accumulated.
 | |
| # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
 | |
| # from the end of the log.
 | |
| 
 | |
| # The minimum age of a log file to be eligible for deletion
 | |
| log.retention.hours=168
 | |
| 
 | |
| # A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
 | |
| # segments don't drop below log.retention.bytes.
 | |
| #log.retention.bytes=1073741824
 | |
| log.retention.bytes=-1
 | |
| 
 | |
| # The maximum size of a log segment file. When this size is reached a new log segment will be created.
 | |
| #log.segment.size=536870912
 | |
| log.segment.bytes=102400
 | |
| 
 | |
| # The interval at which log segments are checked to see if they can be deleted according 
 | |
| # to the retention policies
 | |
| log.cleanup.interval.mins=1
 | |
| 
 | |
| ############################# Zookeeper #############################
 | |
| 
 | |
| # Enable connecting to zookeeper
 | |
| enable.zookeeper=true
 | |
| 
 | |
| # Zk connection string (see zk docs for details).
 | |
| # This is a comma separated host:port pairs, each corresponding to a zk
 | |
| # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
 | |
| # You can also append an optional chroot string to the urls to specify the
 | |
| # root directory for all kafka znodes.
 | |
| zookeeper.connect=localhost:2181
 | |
| 
 | |
| # Timeout in ms for connecting to zookeeper
 | |
| zookeeper.connection.timeout.ms=1000000
 | |
| 
 | |
| monitoring.period.secs=1
 | |
| message.max.bytes=1000000
 | |
| queued.max.requests=500
 | |
| log.roll.hours=168
 | |
| log.index.size.max.bytes=10485760
 | |
| log.index.interval.bytes=4096
 | |
| auto.create.topics.enable=true
 | |
| controller.socket.timeout.ms=30000
 | |
| controller.message.queue.size=10
 | |
| default.replication.factor=1
 | |
| replica.lag.time.max.ms=10000
 | |
| replica.lag.max.messages=4000
 | |
| replica.socket.timeout.ms=30000
 | |
| replica.socket.receive.buffer.bytes=65536
 | |
| replica.fetch.max.bytes=1048576
 | |
| replica.fetch.wait.max.ms=500
 | |
| replica.fetch.min.bytes=4096
 | |
| num.replica.fetchers=1
 |