mirror of https://github.com/apache/kafka.git
KAFKA-537 Expose clientId in ConsumerConfig and fix correlation id; patched by Yang Ye; reviewed by Neha Narkhede
git-svn-id: https://svn.apache.org/repos/asf/incubator/kafka/branches/0.8@1398893 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
d1a22b2e3b
commit
98f6407089
|
@ -60,7 +60,6 @@ public class KafkaETLContext {
|
||||||
protected long _offset = Long.MAX_VALUE; /*current offset*/
|
protected long _offset = Long.MAX_VALUE; /*current offset*/
|
||||||
protected long _count; /*current count*/
|
protected long _count; /*current count*/
|
||||||
|
|
||||||
protected int requestId = 0; /* the id of the next fetch request */
|
|
||||||
protected FetchResponse _response = null; /*fetch response*/
|
protected FetchResponse _response = null; /*fetch response*/
|
||||||
protected Iterator<MessageAndOffset> _messageIt = null; /*message iterator*/
|
protected Iterator<MessageAndOffset> _messageIt = null; /*message iterator*/
|
||||||
protected Iterator<ByteBufferMessageSet> _respIterator = null;
|
protected Iterator<ByteBufferMessageSet> _respIterator = null;
|
||||||
|
@ -74,6 +73,7 @@ public class KafkaETLContext {
|
||||||
|
|
||||||
protected MultipleOutputs _mos;
|
protected MultipleOutputs _mos;
|
||||||
protected OutputCollector<KafkaETLKey, BytesWritable> _offsetOut = null;
|
protected OutputCollector<KafkaETLKey, BytesWritable> _offsetOut = null;
|
||||||
|
protected FetchRequestBuilder builder = new FetchRequestBuilder();
|
||||||
|
|
||||||
public long getTotalBytes() {
|
public long getTotalBytes() {
|
||||||
return (_offsetRange[1] > _offsetRange[0])? _offsetRange[1] - _offsetRange[0] : 0;
|
return (_offsetRange[1] > _offsetRange[0])? _offsetRange[1] - _offsetRange[0] : 0;
|
||||||
|
@ -150,8 +150,7 @@ public class KafkaETLContext {
|
||||||
public boolean fetchMore () throws IOException {
|
public boolean fetchMore () throws IOException {
|
||||||
if (!hasMore()) return false;
|
if (!hasMore()) return false;
|
||||||
|
|
||||||
FetchRequest fetchRequest = new FetchRequestBuilder()
|
FetchRequest fetchRequest = builder
|
||||||
.correlationId(requestId)
|
|
||||||
.clientId(_request.clientId())
|
.clientId(_request.clientId())
|
||||||
.addFetch(_request.getTopic(), _request.getPartition(), _offset, _bufferSize)
|
.addFetch(_request.getTopic(), _request.getPartition(), _offset, _bufferSize)
|
||||||
.build();
|
.build();
|
||||||
|
|
|
@ -23,6 +23,7 @@ import kafka.api.ApiUtils._
|
||||||
import scala.collection.immutable.Map
|
import scala.collection.immutable.Map
|
||||||
import kafka.common.TopicAndPartition
|
import kafka.common.TopicAndPartition
|
||||||
import kafka.consumer.ConsumerConfig
|
import kafka.consumer.ConsumerConfig
|
||||||
|
import java.util.concurrent.atomic.AtomicInteger
|
||||||
|
|
||||||
|
|
||||||
case class PartitionFetchInfo(offset: Long, fetchSize: Int)
|
case class PartitionFetchInfo(offset: Long, fetchSize: Int)
|
||||||
|
@ -30,8 +31,10 @@ case class PartitionFetchInfo(offset: Long, fetchSize: Int)
|
||||||
|
|
||||||
object FetchRequest {
|
object FetchRequest {
|
||||||
val CurrentVersion = 1.shortValue()
|
val CurrentVersion = 1.shortValue()
|
||||||
val DefaultCorrelationId = -1
|
val DefaultMaxWait = 0
|
||||||
val DefaultClientId = ""
|
val DefaultMinBytes = 0
|
||||||
|
val ReplicaFetcherClientId = "replica fetcher"
|
||||||
|
val DefaultCorrelationId = 0
|
||||||
|
|
||||||
def readFrom(buffer: ByteBuffer): FetchRequest = {
|
def readFrom(buffer: ByteBuffer): FetchRequest = {
|
||||||
val versionId = buffer.getShort
|
val versionId = buffer.getShort
|
||||||
|
@ -57,10 +60,10 @@ object FetchRequest {
|
||||||
|
|
||||||
case class FetchRequest(versionId: Short = FetchRequest.CurrentVersion,
|
case class FetchRequest(versionId: Short = FetchRequest.CurrentVersion,
|
||||||
correlationId: Int = FetchRequest.DefaultCorrelationId,
|
correlationId: Int = FetchRequest.DefaultCorrelationId,
|
||||||
clientId: String = FetchRequest.DefaultClientId,
|
clientId: String = ConsumerConfig.DefaultClientId,
|
||||||
replicaId: Int = Request.OrdinaryConsumerId,
|
replicaId: Int = Request.OrdinaryConsumerId,
|
||||||
maxWait: Int = ConsumerConfig.MaxFetchWaitMs,
|
maxWait: Int = FetchRequest.DefaultMaxWait,
|
||||||
minBytes: Int = ConsumerConfig.MinFetchBytes,
|
minBytes: Int = FetchRequest.DefaultMinBytes,
|
||||||
requestInfo: Map[TopicAndPartition, PartitionFetchInfo])
|
requestInfo: Map[TopicAndPartition, PartitionFetchInfo])
|
||||||
extends RequestOrResponse(Some(RequestKeys.FetchKey)) {
|
extends RequestOrResponse(Some(RequestKeys.FetchKey)) {
|
||||||
|
|
||||||
|
@ -123,12 +126,12 @@ case class FetchRequest(versionId: Short = FetchRequest.CurrentVersion,
|
||||||
|
|
||||||
@nonthreadsafe
|
@nonthreadsafe
|
||||||
class FetchRequestBuilder() {
|
class FetchRequestBuilder() {
|
||||||
private var correlationId = FetchRequest.DefaultCorrelationId
|
private val correlationId = new AtomicInteger(0)
|
||||||
private val versionId = FetchRequest.CurrentVersion
|
private val versionId = FetchRequest.CurrentVersion
|
||||||
private var clientId = FetchRequest.DefaultClientId
|
private var clientId = ConsumerConfig.DefaultClientId
|
||||||
private var replicaId = Request.OrdinaryConsumerId
|
private var replicaId = Request.OrdinaryConsumerId
|
||||||
private var maxWait = ConsumerConfig.MaxFetchWaitMs
|
private var maxWait = FetchRequest.DefaultMaxWait
|
||||||
private var minBytes = ConsumerConfig.MinFetchBytes
|
private var minBytes = FetchRequest.DefaultMinBytes
|
||||||
private val requestMap = new collection.mutable.HashMap[TopicAndPartition, PartitionFetchInfo]
|
private val requestMap = new collection.mutable.HashMap[TopicAndPartition, PartitionFetchInfo]
|
||||||
|
|
||||||
def addFetch(topic: String, partition: Int, offset: Long, fetchSize: Int) = {
|
def addFetch(topic: String, partition: Int, offset: Long, fetchSize: Int) = {
|
||||||
|
@ -136,11 +139,6 @@ class FetchRequestBuilder() {
|
||||||
this
|
this
|
||||||
}
|
}
|
||||||
|
|
||||||
def correlationId(correlationId: Int): FetchRequestBuilder = {
|
|
||||||
this.correlationId = correlationId
|
|
||||||
this
|
|
||||||
}
|
|
||||||
|
|
||||||
def clientId(clientId: String): FetchRequestBuilder = {
|
def clientId(clientId: String): FetchRequestBuilder = {
|
||||||
this.clientId = clientId
|
this.clientId = clientId
|
||||||
this
|
this
|
||||||
|
@ -161,5 +159,5 @@ class FetchRequestBuilder() {
|
||||||
this
|
this
|
||||||
}
|
}
|
||||||
|
|
||||||
def build() = FetchRequest(versionId, correlationId, clientId, replicaId, maxWait, minBytes, requestMap.toMap)
|
def build() = FetchRequest(versionId, correlationId.getAndIncrement, clientId, replicaId, maxWait, minBytes, requestMap.toMap)
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,6 +42,7 @@ object ConsumerConfig {
|
||||||
val MirrorTopicsWhitelistProp = "mirror.topics.whitelist"
|
val MirrorTopicsWhitelistProp = "mirror.topics.whitelist"
|
||||||
val MirrorTopicsBlacklistProp = "mirror.topics.blacklist"
|
val MirrorTopicsBlacklistProp = "mirror.topics.blacklist"
|
||||||
val MirrorConsumerNumThreadsProp = "mirror.consumer.numthreads"
|
val MirrorConsumerNumThreadsProp = "mirror.consumer.numthreads"
|
||||||
|
val DefaultClientId = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
class ConsumerConfig private (props: VerifiableProperties) extends ZKConfig(props) {
|
class ConsumerConfig private (props: VerifiableProperties) extends ZKConfig(props) {
|
||||||
|
@ -106,5 +107,10 @@ class ConsumerConfig private (props: VerifiableProperties) extends ZKConfig(prop
|
||||||
* overhead of decompression.
|
* overhead of decompression.
|
||||||
* */
|
* */
|
||||||
val enableShallowIterator = props.getBoolean("shallowiterator.enable", false)
|
val enableShallowIterator = props.getBoolean("shallowiterator.enable", false)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Cliient id is specified by the kafka consumer client, used to distinguish different clients
|
||||||
|
*/
|
||||||
|
val clientId = props.getString("clientid", groupId)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -28,10 +28,15 @@ class ConsumerFetcherThread(name: String,
|
||||||
val config: ConsumerConfig,
|
val config: ConsumerConfig,
|
||||||
sourceBroker: Broker,
|
sourceBroker: Broker,
|
||||||
val consumerFetcherManager: ConsumerFetcherManager)
|
val consumerFetcherManager: ConsumerFetcherManager)
|
||||||
extends AbstractFetcherThread(name = name, sourceBroker = sourceBroker, socketTimeout = config.socketTimeoutMs,
|
extends AbstractFetcherThread(name = name,
|
||||||
socketBufferSize = config.socketBufferSize, fetchSize = config.fetchSize,
|
clientId = config.clientId,
|
||||||
fetcherBrokerId = Request.OrdinaryConsumerId, maxWait = config.maxFetchWaitMs,
|
sourceBroker = sourceBroker,
|
||||||
minBytes = config.minFetchBytes) {
|
socketTimeout = config.socketTimeoutMs,
|
||||||
|
socketBufferSize = config.socketBufferSize,
|
||||||
|
fetchSize = config.fetchSize,
|
||||||
|
fetcherBrokerId = Request.OrdinaryConsumerId,
|
||||||
|
maxWait = config.maxFetchWaitMs,
|
||||||
|
minBytes = config.minFetchBytes) {
|
||||||
|
|
||||||
// process fetched data
|
// process fetched data
|
||||||
def processPartitionData(topicAndPartition: TopicAndPartition, fetchOffset: Long, partitionData: FetchResponsePartitionData) {
|
def processPartitionData(topicAndPartition: TopicAndPartition, fetchOffset: Long, partitionData: FetchResponsePartitionData) {
|
||||||
|
|
|
@ -34,7 +34,7 @@ import java.util.concurrent.TimeUnit
|
||||||
/**
|
/**
|
||||||
* Abstract class for fetching data from multiple partitions from the same broker.
|
* Abstract class for fetching data from multiple partitions from the same broker.
|
||||||
*/
|
*/
|
||||||
abstract class AbstractFetcherThread(name: String, sourceBroker: Broker, socketTimeout: Int, socketBufferSize: Int,
|
abstract class AbstractFetcherThread(name: String, clientId: String, sourceBroker: Broker, socketTimeout: Int, socketBufferSize: Int,
|
||||||
fetchSize: Int, fetcherBrokerId: Int = -1, maxWait: Int = 0, minBytes: Int = 1)
|
fetchSize: Int, fetcherBrokerId: Int = -1, maxWait: Int = 0, minBytes: Int = 1)
|
||||||
extends ShutdownableThread(name) {
|
extends ShutdownableThread(name) {
|
||||||
|
|
||||||
|
@ -42,7 +42,13 @@ abstract class AbstractFetcherThread(name: String, sourceBroker: Broker, socket
|
||||||
private val fetchMapLock = new Object
|
private val fetchMapLock = new Object
|
||||||
val simpleConsumer = new SimpleConsumer(sourceBroker.host, sourceBroker.port, socketTimeout, socketBufferSize)
|
val simpleConsumer = new SimpleConsumer(sourceBroker.host, sourceBroker.port, socketTimeout, socketBufferSize)
|
||||||
val fetcherMetrics = FetcherStat.getFetcherStat(name + "-" + sourceBroker.id)
|
val fetcherMetrics = FetcherStat.getFetcherStat(name + "-" + sourceBroker.id)
|
||||||
|
|
||||||
|
val fetchRequestuilder = new FetchRequestBuilder().
|
||||||
|
clientId(clientId).
|
||||||
|
replicaId(fetcherBrokerId).
|
||||||
|
maxWait(maxWait).
|
||||||
|
minBytes(minBytes)
|
||||||
|
|
||||||
/* callbacks to be defined in subclass */
|
/* callbacks to be defined in subclass */
|
||||||
|
|
||||||
// process fetched data
|
// process fetched data
|
||||||
|
@ -61,21 +67,15 @@ abstract class AbstractFetcherThread(name: String, sourceBroker: Broker, socket
|
||||||
}
|
}
|
||||||
|
|
||||||
override def doWork() {
|
override def doWork() {
|
||||||
val builder = new FetchRequestBuilder().
|
|
||||||
clientId(name).
|
|
||||||
replicaId(fetcherBrokerId).
|
|
||||||
maxWait(maxWait).
|
|
||||||
minBytes(minBytes)
|
|
||||||
|
|
||||||
fetchMapLock synchronized {
|
fetchMapLock synchronized {
|
||||||
fetchMap.foreach {
|
fetchMap.foreach {
|
||||||
case((topicAndPartition, offset)) =>
|
case((topicAndPartition, offset)) =>
|
||||||
builder.addFetch(topicAndPartition.topic, topicAndPartition.partition,
|
fetchRequestuilder.addFetch(topicAndPartition.topic, topicAndPartition.partition,
|
||||||
offset, fetchSize)
|
offset, fetchSize)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
val fetchRequest = builder.build()
|
val fetchRequest = fetchRequestuilder.build()
|
||||||
val partitionsWithError = new mutable.HashSet[TopicAndPartition]
|
val partitionsWithError = new mutable.HashSet[TopicAndPartition]
|
||||||
var response: FetchResponse = null
|
var response: FetchResponse = null
|
||||||
try {
|
try {
|
||||||
|
|
|
@ -17,17 +17,25 @@
|
||||||
|
|
||||||
package kafka.server
|
package kafka.server
|
||||||
|
|
||||||
import kafka.api.{PartitionOffsetRequestInfo, OffsetRequest, FetchResponsePartitionData}
|
|
||||||
import kafka.cluster.Broker
|
import kafka.cluster.Broker
|
||||||
import kafka.message.ByteBufferMessageSet
|
import kafka.message.ByteBufferMessageSet
|
||||||
import kafka.common.TopicAndPartition
|
import kafka.common.TopicAndPartition
|
||||||
|
import kafka.api.{FetchRequest, PartitionOffsetRequestInfo, OffsetRequest, FetchResponsePartitionData}
|
||||||
|
|
||||||
|
|
||||||
class ReplicaFetcherThread(name:String, sourceBroker: Broker, brokerConfig: KafkaConfig, replicaMgr: ReplicaManager)
|
class ReplicaFetcherThread(name:String,
|
||||||
extends AbstractFetcherThread(name = name, sourceBroker = sourceBroker, socketTimeout = brokerConfig.replicaSocketTimeoutMs,
|
sourceBroker: Broker,
|
||||||
socketBufferSize = brokerConfig.replicaSocketBufferSize, fetchSize = brokerConfig.replicaFetchSize,
|
brokerConfig: KafkaConfig,
|
||||||
fetcherBrokerId = brokerConfig.brokerId, maxWait = brokerConfig.replicaMaxWaitTimeMs,
|
replicaMgr: ReplicaManager)
|
||||||
minBytes = brokerConfig.replicaMinBytes) {
|
extends AbstractFetcherThread(name = name,
|
||||||
|
clientId = FetchRequest.ReplicaFetcherClientId + "- %s:%d".format(sourceBroker.host, sourceBroker.port) ,
|
||||||
|
sourceBroker = sourceBroker,
|
||||||
|
socketTimeout = brokerConfig.replicaSocketTimeoutMs,
|
||||||
|
socketBufferSize = brokerConfig.replicaSocketBufferSize,
|
||||||
|
fetchSize = brokerConfig.replicaFetchSize,
|
||||||
|
fetcherBrokerId = brokerConfig.brokerId,
|
||||||
|
maxWait = brokerConfig.replicaMaxWaitTimeMs,
|
||||||
|
minBytes = brokerConfig.replicaMinBytes) {
|
||||||
|
|
||||||
// process fetched data
|
// process fetched data
|
||||||
def processPartitionData(topicAndPartition: TopicAndPartition, fetchOffset: Long, partitionData: FetchResponsePartitionData) {
|
def processPartitionData(topicAndPartition: TopicAndPartition, fetchOffset: Long, partitionData: FetchResponsePartitionData) {
|
||||||
|
|
|
@ -114,6 +114,12 @@ object SimpleConsumerShell extends Logging {
|
||||||
val messageFormatterClass = Class.forName(options.valueOf(messageFormatterOpt))
|
val messageFormatterClass = Class.forName(options.valueOf(messageFormatterOpt))
|
||||||
val formatterArgs = MessageFormatter.tryParseFormatterArgs(options.valuesOf(messageFormatterArgOpt))
|
val formatterArgs = MessageFormatter.tryParseFormatterArgs(options.valuesOf(messageFormatterArgOpt))
|
||||||
|
|
||||||
|
val fetchRequestBuilder = new FetchRequestBuilder()
|
||||||
|
.clientId(clientId)
|
||||||
|
.replicaId(Request.DebuggingConsumerId)
|
||||||
|
.maxWait(maxWaitMs)
|
||||||
|
.minBytes(ConsumerConfig.MinFetchBytes)
|
||||||
|
|
||||||
// getting topic metadata
|
// getting topic metadata
|
||||||
info("Getting topic metatdata...")
|
info("Getting topic metatdata...")
|
||||||
val metadataTargetBrokers = ClientUtils.parseBrokerList(options.valueOf(brokerListOpt))
|
val metadataTargetBrokers = ClientUtils.parseBrokerList(options.valueOf(brokerListOpt))
|
||||||
|
@ -168,17 +174,11 @@ object SimpleConsumerShell extends Logging {
|
||||||
val simpleConsumer = new SimpleConsumer(fetchTargetBroker.host, fetchTargetBroker.port, 10000, 64*1024)
|
val simpleConsumer = new SimpleConsumer(fetchTargetBroker.host, fetchTargetBroker.port, 10000, 64*1024)
|
||||||
val thread = Utils.newThread("kafka-simpleconsumer-shell", new Runnable() {
|
val thread = Utils.newThread("kafka-simpleconsumer-shell", new Runnable() {
|
||||||
def run() {
|
def run() {
|
||||||
var reqId = 0
|
|
||||||
var offset = startingOffset
|
var offset = startingOffset
|
||||||
try {
|
try {
|
||||||
while(true) {
|
while(true) {
|
||||||
val fetchRequest = new FetchRequestBuilder()
|
val fetchRequest = fetchRequestBuilder
|
||||||
.correlationId(reqId)
|
|
||||||
.clientId(clientId)
|
|
||||||
.replicaId(Request.DebuggingConsumerId)
|
|
||||||
.addFetch(topic, partitionId, offset, fetchSize)
|
.addFetch(topic, partitionId, offset, fetchSize)
|
||||||
.maxWait(maxWaitMs)
|
|
||||||
.minBytes(ConsumerConfig.MinFetchBytes)
|
|
||||||
.build()
|
.build()
|
||||||
val fetchResponse = simpleConsumer.fetch(fetchRequest)
|
val fetchResponse = simpleConsumer.fetch(fetchRequest)
|
||||||
val messageSet = fetchResponse.messageSet(topic, partitionId)
|
val messageSet = fetchResponse.messageSet(topic, partitionId)
|
||||||
|
@ -206,11 +206,10 @@ object SimpleConsumerShell extends Logging {
|
||||||
}
|
}
|
||||||
consumed += 1
|
consumed += 1
|
||||||
}
|
}
|
||||||
reqId += 1
|
|
||||||
}
|
}
|
||||||
} catch {
|
} catch {
|
||||||
case e: Throwable =>
|
case e: Throwable =>
|
||||||
error("Error consuming topic, partition, replica (%s, %d, %d) with request id [%d] and offset [%d]".format(topic, partitionId, replicaId, reqId, offset), e)
|
error("Error consuming topic, partition, replica (%s, %d, %d) with offset [%d]".format(topic, partitionId, replicaId, offset), e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}, false)
|
}, false)
|
||||||
|
|
|
@ -62,7 +62,6 @@ class PrimitiveApiTest extends JUnit3Suite with ProducerConsumerTestHarness with
|
||||||
|
|
||||||
def testFetchRequestCanProperlySerialize() {
|
def testFetchRequestCanProperlySerialize() {
|
||||||
val request = new FetchRequestBuilder()
|
val request = new FetchRequestBuilder()
|
||||||
.correlationId(100)
|
|
||||||
.clientId("test-client")
|
.clientId("test-client")
|
||||||
.maxWait(10001)
|
.maxWait(10001)
|
||||||
.minBytes(4444)
|
.minBytes(4444)
|
||||||
|
@ -99,12 +98,11 @@ class PrimitiveApiTest extends JUnit3Suite with ProducerConsumerTestHarness with
|
||||||
replica.logEndOffset > 0 && replica.logEndOffset == replica.highWatermark)
|
replica.logEndOffset > 0 && replica.logEndOffset == replica.highWatermark)
|
||||||
|
|
||||||
val request = new FetchRequestBuilder()
|
val request = new FetchRequestBuilder()
|
||||||
.correlationId(100)
|
|
||||||
.clientId("test-client")
|
.clientId("test-client")
|
||||||
.addFetch(topic, 0, 0, 10000)
|
.addFetch(topic, 0, 0, 10000)
|
||||||
.build()
|
.build()
|
||||||
val fetched = consumer.fetch(request)
|
val fetched = consumer.fetch(request)
|
||||||
assertEquals("Returned correlationId doesn't match that in request.", 100, fetched.correlationId)
|
assertEquals("Returned correlationId doesn't match that in request.", 0, fetched.correlationId)
|
||||||
|
|
||||||
val messageSet = fetched.messageSet(topic, 0)
|
val messageSet = fetched.messageSet(topic, 0)
|
||||||
assertTrue(messageSet.iterator.hasNext)
|
assertTrue(messageSet.iterator.hasNext)
|
||||||
|
|
|
@ -57,7 +57,6 @@ public class SimpleConsumerDemo {
|
||||||
|
|
||||||
System.out.println("Testing single fetch");
|
System.out.println("Testing single fetch");
|
||||||
FetchRequest req = new FetchRequestBuilder()
|
FetchRequest req = new FetchRequestBuilder()
|
||||||
.correlationId(0)
|
|
||||||
.clientId(KafkaProperties.clientId)
|
.clientId(KafkaProperties.clientId)
|
||||||
.addFetch(KafkaProperties.topic2, 0, 0L, 100)
|
.addFetch(KafkaProperties.topic2, 0, 0L, 100)
|
||||||
.build();
|
.build();
|
||||||
|
@ -70,7 +69,6 @@ public class SimpleConsumerDemo {
|
||||||
put(KafkaProperties.topic3, new ArrayList<Integer>(){{ add(0); }});
|
put(KafkaProperties.topic3, new ArrayList<Integer>(){{ add(0); }});
|
||||||
}};
|
}};
|
||||||
req = new FetchRequestBuilder()
|
req = new FetchRequestBuilder()
|
||||||
.correlationId(0)
|
|
||||||
.clientId(KafkaProperties.clientId)
|
.clientId(KafkaProperties.clientId)
|
||||||
.addFetch(KafkaProperties.topic2, 0, 0L, 100)
|
.addFetch(KafkaProperties.topic2, 0, 0L, 100)
|
||||||
.addFetch(KafkaProperties.topic3, 0, 0L, 100)
|
.addFetch(KafkaProperties.topic3, 0, 0L, 100)
|
||||||
|
|
|
@ -59,11 +59,9 @@ object SimpleConsumerPerformance {
|
||||||
var lastReportTime: Long = startMs
|
var lastReportTime: Long = startMs
|
||||||
var lastBytesRead = 0L
|
var lastBytesRead = 0L
|
||||||
var lastMessagesRead = 0L
|
var lastMessagesRead = 0L
|
||||||
var reqId = 0
|
|
||||||
while(!done) {
|
while(!done) {
|
||||||
// TODO: add in the maxWait and minBytes for performance
|
// TODO: add in the maxWait and minBytes for performance
|
||||||
val request = new FetchRequestBuilder()
|
val request = new FetchRequestBuilder()
|
||||||
.correlationId(reqId)
|
|
||||||
.clientId(config.clientId)
|
.clientId(config.clientId)
|
||||||
.addFetch(config.topic, config.partition, offset, config.fetchSize)
|
.addFetch(config.topic, config.partition, offset, config.fetchSize)
|
||||||
.build()
|
.build()
|
||||||
|
@ -101,7 +99,6 @@ object SimpleConsumerPerformance {
|
||||||
lastMessagesRead = totalMessagesRead
|
lastMessagesRead = totalMessagesRead
|
||||||
consumedInterval = 0
|
consumedInterval = 0
|
||||||
}
|
}
|
||||||
reqId += 1
|
|
||||||
}
|
}
|
||||||
val reportTime = System.currentTimeMillis
|
val reportTime = System.currentTimeMillis
|
||||||
val elapsed = (reportTime - startMs) / 1000.0
|
val elapsed = (reportTime - startMs) / 1000.0
|
||||||
|
|
Loading…
Reference in New Issue