mirror of https://github.com/apache/kafka.git
This PR removes a small piece of unused code which is also not correct. Reviewers: Apoorv Mittal <apoorvmittal10@gmail.com>
This commit is contained in:
parent
6d67d82d5b
commit
019459e950
|
@ -18,9 +18,8 @@ package kafka.server
|
||||||
|
|
||||||
import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterTest, ClusterTestDefaults, Type}
|
import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterTest, ClusterTestDefaults, Type}
|
||||||
import kafka.utils.TestUtils
|
import kafka.utils.TestUtils
|
||||||
import kafka.utils.TestUtils.waitForAllPartitionsMetadata
|
|
||||||
import org.apache.kafka.clients.admin.{Admin, NewPartitions}
|
import org.apache.kafka.clients.admin.{Admin, NewPartitions}
|
||||||
import org.apache.kafka.common.{TopicPartition, Uuid}
|
import org.apache.kafka.common.Uuid
|
||||||
import org.apache.kafka.common.message.{ShareGroupHeartbeatRequestData, ShareGroupHeartbeatResponseData}
|
import org.apache.kafka.common.message.{ShareGroupHeartbeatRequestData, ShareGroupHeartbeatResponseData}
|
||||||
import org.apache.kafka.common.protocol.Errors
|
import org.apache.kafka.common.protocol.Errors
|
||||||
import org.apache.kafka.common.requests.{ShareGroupHeartbeatRequest, ShareGroupHeartbeatResponse}
|
import org.apache.kafka.common.requests.{ShareGroupHeartbeatRequest, ShareGroupHeartbeatResponse}
|
||||||
|
@ -524,7 +523,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) {
|
||||||
// Verify the response.
|
// Verify the response.
|
||||||
assertEquals(5, shareGroupHeartbeatResponse.data.memberEpoch)
|
assertEquals(5, shareGroupHeartbeatResponse.data.memberEpoch)
|
||||||
// Increasing the partitions of topic bar which is already being consumed in the share group.
|
// Increasing the partitions of topic bar which is already being consumed in the share group.
|
||||||
increasePartitions(admin, "bar", 6, Seq.empty)
|
increasePartitions(admin, "bar", 6)
|
||||||
|
|
||||||
expectedAssignment = new ShareGroupHeartbeatResponseData.Assignment()
|
expectedAssignment = new ShareGroupHeartbeatResponseData.Assignment()
|
||||||
.setTopicPartitions(List(
|
.setTopicPartitions(List(
|
||||||
|
@ -876,20 +875,9 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) {
|
||||||
|
|
||||||
private def increasePartitions[B <: KafkaBroker](admin: Admin,
|
private def increasePartitions[B <: KafkaBroker](admin: Admin,
|
||||||
topic: String,
|
topic: String,
|
||||||
totalPartitionCount: Int,
|
totalPartitionCount: Int
|
||||||
brokersToValidate: Seq[B]
|
|
||||||
): Unit = {
|
): Unit = {
|
||||||
val newPartitionSet: Map[String, NewPartitions] = Map.apply(topic -> NewPartitions.increaseTo(totalPartitionCount))
|
val newPartitionSet: Map[String, NewPartitions] = Map.apply(topic -> NewPartitions.increaseTo(totalPartitionCount))
|
||||||
admin.createPartitions(newPartitionSet.asJava)
|
admin.createPartitions(newPartitionSet.asJava)
|
||||||
|
|
||||||
if (brokersToValidate.nonEmpty) {
|
|
||||||
// wait until we've propagated all partitions metadata to all brokers
|
|
||||||
val allPartitionsMetadata = waitForAllPartitionsMetadata(brokersToValidate, topic, totalPartitionCount)
|
|
||||||
(0 until totalPartitionCount - 1).foreach(i => {
|
|
||||||
allPartitionsMetadata.get(new TopicPartition(topic, i)).foreach { partitionMetadata =>
|
|
||||||
assertEquals(totalPartitionCount, partitionMetadata.isr.size)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue