mirror of https://github.com/apache/kafka.git
MINOR: Deflake EligibleLeaderReplicasIntegrationTest (#18923)
Make sure to give enough time for the partition ISR updates. Reviewers: David Jacot <djacot@confluent.io>
This commit is contained in:
parent
c89fd2bff6
commit
1eecd02ce8
|
@ -70,6 +70,7 @@ import scala.collection.JavaConverters;
|
||||||
import scala.collection.Seq;
|
import scala.collection.Seq;
|
||||||
import scala.collection.mutable.HashMap;
|
import scala.collection.mutable.HashMap;
|
||||||
|
|
||||||
|
import static org.apache.kafka.test.TestUtils.DEFAULT_MAX_WAIT_MS;
|
||||||
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
|
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
|
||||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||||
|
@ -202,7 +203,7 @@ public class EligibleLeaderReplicasIntegrationTest extends KafkaServerTestHarnes
|
||||||
}
|
}
|
||||||
|
|
||||||
void waitUntilOneMessageIsConsumed(Consumer consumer) {
|
void waitUntilOneMessageIsConsumed(Consumer consumer) {
|
||||||
kafka.utils.TestUtils.waitUntilTrue(
|
TestUtils.waitUntilTrue(
|
||||||
() -> {
|
() -> {
|
||||||
try {
|
try {
|
||||||
ConsumerRecords record = consumer.poll(Duration.ofMillis(100L));
|
ConsumerRecords record = consumer.poll(Duration.ofMillis(100L));
|
||||||
|
@ -212,7 +213,7 @@ public class EligibleLeaderReplicasIntegrationTest extends KafkaServerTestHarnes
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
() -> "fail to consume messages",
|
() -> "fail to consume messages",
|
||||||
org.apache.kafka.test.TestUtils.DEFAULT_MAX_WAIT_MS, 100L
|
DEFAULT_MAX_WAIT_MS, 100L
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -417,30 +418,39 @@ public class EligibleLeaderReplicasIntegrationTest extends KafkaServerTestHarnes
|
||||||
waitForIsrAndElr((isrSize, elrSize) -> {
|
waitForIsrAndElr((isrSize, elrSize) -> {
|
||||||
return isrSize > 0 && elrSize == 0;
|
return isrSize > 0 && elrSize == 0;
|
||||||
});
|
});
|
||||||
topicPartitionInfo = adminClient.describeTopics(Collections.singletonList(testTopicName))
|
|
||||||
|
TestUtils.waitUntilTrue(
|
||||||
|
() -> {
|
||||||
|
try {
|
||||||
|
TopicPartitionInfo partition = adminClient.describeTopics(Collections.singletonList(testTopicName))
|
||||||
.allTopicNames().get().get(testTopicName).partitions().get(0);
|
.allTopicNames().get().get(testTopicName).partitions().get(0);
|
||||||
assertEquals(0, topicPartitionInfo.lastKnownElr().size());
|
if (partition.leader() == null) return false;
|
||||||
assertEquals(0, topicPartitionInfo.elr().size());
|
return partition.lastKnownElr().isEmpty() && partition.elr().isEmpty() && partition.leader().id() == lastKnownLeader;
|
||||||
assertEquals(lastKnownLeader, topicPartitionInfo.leader().id());
|
} catch (Exception e) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
() -> String.format("Partition metadata for %s is not correct", testTopicName),
|
||||||
|
DEFAULT_MAX_WAIT_MS, 100L
|
||||||
|
);
|
||||||
} finally {
|
} finally {
|
||||||
restartDeadBrokers(false);
|
restartDeadBrokers(false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void waitForIsrAndElr(BiFunction<Integer, Integer, Boolean> isIsrAndElrSizeSatisfied) {
|
void waitForIsrAndElr(BiFunction<Integer, Integer, Boolean> isIsrAndElrSizeSatisfied) {
|
||||||
kafka.utils.TestUtils.waitUntilTrue(
|
TestUtils.waitUntilTrue(
|
||||||
() -> {
|
() -> {
|
||||||
try {
|
try {
|
||||||
TopicDescription topicDescription = adminClient.describeTopics(Collections.singletonList(testTopicName))
|
TopicDescription topicDescription = adminClient.describeTopics(Collections.singletonList(testTopicName))
|
||||||
.allTopicNames().get().get(testTopicName);
|
.allTopicNames().get().get(testTopicName);
|
||||||
TopicPartitionInfo partition = topicDescription.partitions().get(0);
|
TopicPartitionInfo partition = topicDescription.partitions().get(0);
|
||||||
if (!isIsrAndElrSizeSatisfied.apply(partition.isr().size(), partition.elr().size())) return false;
|
return isIsrAndElrSizeSatisfied.apply(partition.isr().size(), partition.elr().size());
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
return true;
|
|
||||||
},
|
},
|
||||||
() -> String.format("Partition metadata for %s is not propagated", testTopicName),
|
() -> String.format("Partition metadata for %s is not propagated", testTopicName),
|
||||||
org.apache.kafka.test.TestUtils.DEFAULT_MAX_WAIT_MS, 100L);
|
DEFAULT_MAX_WAIT_MS, 100L);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue