max.message.size and fetch.size defaults should be consistent; patched by Pierre-Yves Ritschard; reviewed by Jun Rao; KAFKA-247

git-svn-id: https://svn.apache.org/repos/asf/incubator/kafka/trunk@1232500 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jun Rao 2012-01-17 17:30:38 +00:00
parent 1021ea9002
commit d3c58b6428
1 changed files with 1 additions and 4 deletions

View File

@ -24,7 +24,7 @@ import kafka.common.InvalidConfigException
object ConsumerConfig {
val SocketTimeout = 30 * 1000
val SocketBufferSize = 64*1024
val FetchSize = 300 * 1024
val FetchSize = 1024 * 1024
val MaxFetchSize = 10*FetchSize
val DefaultFetcherBackoffMs = 1000
val AutoCommit = true
@ -62,9 +62,6 @@ class ConsumerConfig(props: Properties) extends ZKConfig(props) {
/** the number of byes of messages to attempt to fetch */
val fetchSize = Utils.getInt(props, "fetch.size", FetchSize)
/** the maximum allowable fetch size for a very large message */
val maxFetchSize: Int = fetchSize * 10
/** to avoid repeatedly polling a broker node which has no new data
we will backoff every time we get an empty set from the broker*/
val fetcherBackoffMs: Long = Utils.getInt(props, "fetcher.backoff.ms", DefaultFetcherBackoffMs)