diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RebalanceSourceConnectorsIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RebalanceSourceConnectorsIntegrationTest.java index ae3e20d2a8f..855882c9e69 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RebalanceSourceConnectorsIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RebalanceSourceConnectorsIntegrationTest.java @@ -188,6 +188,7 @@ public class RebalanceSourceConnectorsIntegrationTest { } @Test + @Ignore // TODO: To be re-enabled once we can make it less flaky (KAFKA-8391) public void testDeleteConnector() throws Exception { // create test topic connect.kafka().createTopic(TOPIC_NAME, NUM_TOPIC_PARTITIONS); @@ -269,8 +270,7 @@ public class RebalanceSourceConnectorsIntegrationTest { WORKER_SETUP_DURATION_MS, "Connect and tasks are imbalanced between the workers."); } - // should enable it after KAFKA-12495 fixed - @Ignore + @Ignore // TODO: To be re-enabled once we can make it less flaky (KAFKA-12495, KAFKA-12283) @Test public void testMultipleWorkersRejoining() throws Exception { // create test topic diff --git a/core/src/test/scala/integration/kafka/api/ConsumerBounceTest.scala b/core/src/test/scala/integration/kafka/api/ConsumerBounceTest.scala index f675ef5314a..b3afc507582 100644 --- a/core/src/test/scala/integration/kafka/api/ConsumerBounceTest.scala +++ b/core/src/test/scala/integration/kafka/api/ConsumerBounceTest.scala @@ -211,6 +211,7 @@ class ConsumerBounceTest extends AbstractConsumerTest with Logging { } @Test + @Disabled // TODO: To be re-enabled once we can make it less flaky (KAFKA-7540) def testClose(): Unit = { val numRecords = 10 val producer = createProducer() @@ -299,6 +300,7 @@ class ConsumerBounceTest extends AbstractConsumerTest with Logging { * Then, 1 consumer should be left out of the group. */ @Test + @Disabled // TODO: To be re-enabled once we can make it less flaky (KAFKA-13421) def testRollingBrokerRestartsWithSmallerMaxGroupSizeConfigDisruptsBigGroup(): Unit = { val group = "group-max-size-test" val topic = "group-max-size-test" diff --git a/core/src/test/scala/integration/kafka/server/DynamicBrokerReconfigurationTest.scala b/core/src/test/scala/integration/kafka/server/DynamicBrokerReconfigurationTest.scala index 0cc58628103..1d64106f096 100644 --- a/core/src/test/scala/integration/kafka/server/DynamicBrokerReconfigurationTest.scala +++ b/core/src/test/scala/integration/kafka/server/DynamicBrokerReconfigurationTest.scala @@ -552,6 +552,7 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup } @Test + @Disabled // TODO: To be re-enabled once we can make it less flaky: KAFKA-6527 def testDefaultTopicConfig(): Unit = { val (producerThread, consumerThread) = startProduceConsume(retries = 0) @@ -665,6 +666,7 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup } @Test + @Disabled // TODO: To be re-enabled once we can make it less flaky: KAFKA-8280 def testUncleanLeaderElectionEnable(): Unit = { val controller = servers.find(_.config.brokerId == TestUtils.waitUntilControllerElected(zkClient)).get val controllerId = controller.config.brokerId @@ -726,6 +728,7 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup } @Test + @Disabled // TODO: To be re-enabled once we can make it less flaky (KAFKA-13672) def testThreadPoolResize(): Unit = { val requestHandlerPrefix = "data-plane-kafka-request-handler-" val networkThreadPrefix = "data-plane-kafka-network-thread-" @@ -864,6 +867,7 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup } @Test + @Disabled // TODO: To be re-enabled once we can make it less flaky (KAFKA-7957) def testMetricsReporterUpdate(): Unit = { // Add a new metrics reporter val newProps = new Properties diff --git a/core/src/test/scala/unit/kafka/admin/LeaderElectionCommandTest.scala b/core/src/test/scala/unit/kafka/admin/LeaderElectionCommandTest.scala index 4a2a401655f..e9efdabe8ef 100644 --- a/core/src/test/scala/unit/kafka/admin/LeaderElectionCommandTest.scala +++ b/core/src/test/scala/unit/kafka/admin/LeaderElectionCommandTest.scala @@ -32,7 +32,7 @@ import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.errors.UnknownTopicOrPartitionException import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.extension.ExtendWith -import org.junit.jupiter.api.{BeforeEach, Tag} +import org.junit.jupiter.api.{BeforeEach, Disabled, Tag} @ExtendWith(value = Array(classOf[ClusterTestExtensions])) @ClusterTestDefaults(clusterType = Type.BOTH, brokers = 3) @@ -86,6 +86,7 @@ final class LeaderElectionCommandTest(cluster: ClusterInstance) { } @ClusterTest + @Disabled // TODO: re-enable until we fixed KAFKA-8541 def testTopicPartition(): Unit = { val client = cluster.createAdminClient() val topic = "unclean-topic" @@ -119,6 +120,7 @@ final class LeaderElectionCommandTest(cluster: ClusterInstance) { } @ClusterTest + @Disabled // TODO: re-enable until we fixed KAFKA-8785 def testPathToJsonFile(): Unit = { val client = cluster.createAdminClient() val topic = "unclean-topic" @@ -153,6 +155,7 @@ final class LeaderElectionCommandTest(cluster: ClusterInstance) { } @ClusterTest + @Disabled // TODO: re-enable after KAFKA-13737 is fixed def testPreferredReplicaElection(): Unit = { val client = cluster.createAdminClient() val topic = "preferred-topic" diff --git a/core/src/test/scala/unit/kafka/network/SocketServerTest.scala b/core/src/test/scala/unit/kafka/network/SocketServerTest.scala index 849646cb644..33d15ad10b5 100644 --- a/core/src/test/scala/unit/kafka/network/SocketServerTest.scala +++ b/core/src/test/scala/unit/kafka/network/SocketServerTest.scala @@ -1389,6 +1389,7 @@ class SocketServerTest { * buffered receive. */ @Test + @Disabled // TODO: re-enabled until KAFKA-13735 is fixed def remoteCloseWithoutBufferedReceives(): Unit = { verifyRemoteCloseWithBufferedReceives(numComplete = 0, hasIncomplete = false) } @@ -1426,6 +1427,7 @@ class SocketServerTest { * The channel must be closed after pending receives are processed. */ @Test + @Disabled // TODO: re-enable after KAFKA-13736 is fixed def closingChannelWithBufferedReceives(): Unit = { verifyRemoteCloseWithBufferedReceives(numComplete = 3, hasIncomplete = false, makeClosing = true) }