mirror of https://github.com/apache/kafka.git
MINOR: Remove unused parameters in functions. (#10035)
Reviewers: Chia-Ping Tsai <chia7712@gmail.com>
This commit is contained in:
parent
982ea2f6a4
commit
7583e14fb2
|
@ -720,7 +720,7 @@ class KafkaApis(val requestChannel: RequestChannel,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
def maybeDownConvertStorageError(error: Errors, version: Short): Errors = {
|
def maybeDownConvertStorageError(error: Errors): Errors = {
|
||||||
// If consumer sends FetchRequest V5 or earlier, the client library is not guaranteed to recognize the error code
|
// If consumer sends FetchRequest V5 or earlier, the client library is not guaranteed to recognize the error code
|
||||||
// for KafkaStorageException. In this case the client library will translate KafkaStorageException to
|
// for KafkaStorageException. In this case the client library will translate KafkaStorageException to
|
||||||
// UnknownServerException which is not retriable. We can ensure that consumer will update metadata and retry
|
// UnknownServerException which is not retriable. We can ensure that consumer will update metadata and retry
|
||||||
|
@ -771,7 +771,7 @@ class KafkaApis(val requestChannel: RequestChannel,
|
||||||
// as possible. With KIP-283, we have the ability to lazily down-convert in a chunked manner. The lazy, chunked
|
// as possible. With KIP-283, we have the ability to lazily down-convert in a chunked manner. The lazy, chunked
|
||||||
// down-conversion always guarantees that at least one batch of messages is down-converted and sent out to the
|
// down-conversion always guarantees that at least one batch of messages is down-converted and sent out to the
|
||||||
// client.
|
// client.
|
||||||
val error = maybeDownConvertStorageError(partitionData.error, versionId)
|
val error = maybeDownConvertStorageError(partitionData.error)
|
||||||
new FetchResponse.PartitionData[BaseRecords](error, partitionData.highWatermark,
|
new FetchResponse.PartitionData[BaseRecords](error, partitionData.highWatermark,
|
||||||
partitionData.lastStableOffset, partitionData.logStartOffset,
|
partitionData.lastStableOffset, partitionData.logStartOffset,
|
||||||
partitionData.preferredReadReplica, partitionData.abortedTransactions,
|
partitionData.preferredReadReplica, partitionData.abortedTransactions,
|
||||||
|
@ -783,7 +783,7 @@ class KafkaApis(val requestChannel: RequestChannel,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case None =>
|
case None =>
|
||||||
val error = maybeDownConvertStorageError(partitionData.error, versionId)
|
val error = maybeDownConvertStorageError(partitionData.error)
|
||||||
new FetchResponse.PartitionData[BaseRecords](error,
|
new FetchResponse.PartitionData[BaseRecords](error,
|
||||||
partitionData.highWatermark,
|
partitionData.highWatermark,
|
||||||
partitionData.lastStableOffset,
|
partitionData.lastStableOffset,
|
||||||
|
@ -805,7 +805,7 @@ class KafkaApis(val requestChannel: RequestChannel,
|
||||||
val lastStableOffset = data.lastStableOffset.getOrElse(FetchResponse.INVALID_LAST_STABLE_OFFSET)
|
val lastStableOffset = data.lastStableOffset.getOrElse(FetchResponse.INVALID_LAST_STABLE_OFFSET)
|
||||||
if (data.isReassignmentFetch)
|
if (data.isReassignmentFetch)
|
||||||
reassigningPartitions.add(tp)
|
reassigningPartitions.add(tp)
|
||||||
val error = maybeDownConvertStorageError(data.error, versionId)
|
val error = maybeDownConvertStorageError(data.error)
|
||||||
partitions.put(tp, new FetchResponse.PartitionData(
|
partitions.put(tp, new FetchResponse.PartitionData(
|
||||||
error,
|
error,
|
||||||
data.highWatermark,
|
data.highWatermark,
|
||||||
|
|
Loading…
Reference in New Issue