mirror of https://github.com/apache/kafka.git
KAFKA-1836 metadata.fetch.timeout.ms set to zero blocks forever; reviewed by Neha Narkhede and Ewen Cheslack-Postava
This commit is contained in:
parent
b1b80860a0
commit
ad4883a0cd
|
@ -25,7 +25,7 @@ import org.slf4j.LoggerFactory;
|
|||
* <p>
|
||||
* This class is shared by the client thread (for partitioning) and the background sender thread.
|
||||
*
|
||||
* Metadata is maintained for only a subset of topics, which can be added to over time. When we request metdata for a
|
||||
* Metadata is maintained for only a subset of topics, which can be added to over time. When we request metadata for a
|
||||
* topic we don't have any metadata for it will trigger a metadata update.
|
||||
*/
|
||||
public final class Metadata {
|
||||
|
@ -99,12 +99,17 @@ public final class Metadata {
|
|||
/**
|
||||
* Wait for metadata update until the current version is larger than the last version we know of
|
||||
*/
|
||||
public synchronized void awaitUpdate(int lastVerison, long maxWaitMs) {
|
||||
public synchronized void awaitUpdate(final int lastVersion, final long maxWaitMs) {
|
||||
if (maxWaitMs < 0) {
|
||||
throw new IllegalArgumentException("Max time to wait for metadata updates should not be < 0 milli seconds");
|
||||
}
|
||||
long begin = System.currentTimeMillis();
|
||||
long remainingWaitMs = maxWaitMs;
|
||||
while (this.version <= lastVerison) {
|
||||
while (this.version <= lastVersion) {
|
||||
try {
|
||||
if (remainingWaitMs != 0) {
|
||||
wait(remainingWaitMs);
|
||||
}
|
||||
} catch (InterruptedException e) { /* this is fine */
|
||||
}
|
||||
long elapsed = System.currentTimeMillis() - begin;
|
||||
|
|
|
@ -12,15 +12,14 @@
|
|||
*/
|
||||
package org.apache.kafka.clients.producer;
|
||||
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import org.apache.kafka.clients.producer.internals.Metadata;
|
||||
import org.apache.kafka.common.Cluster;
|
||||
import org.apache.kafka.common.errors.TimeoutException;
|
||||
import org.apache.kafka.test.TestUtils;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
public class MetadataTest {
|
||||
|
||||
private long refreshBackoffMs = 100;
|
||||
|
@ -49,6 +48,35 @@ public class MetadataTest {
|
|||
assertTrue("Update needed due to stale metadata.", metadata.timeToNextUpdate(time) == 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests that {@link org.apache.kafka.clients.producer.internals.Metadata#awaitUpdate(int, long)} doesn't
|
||||
* wait forever with a max timeout value of 0
|
||||
*
|
||||
* @throws Exception
|
||||
* @see https://issues.apache.org/jira/browse/KAFKA-1836
|
||||
*/
|
||||
@Test
|
||||
public void testMetadataUpdateWaitTime() throws Exception {
|
||||
long time = 0;
|
||||
metadata.update(Cluster.empty(), time);
|
||||
assertFalse("No update needed.", metadata.timeToNextUpdate(time) == 0);
|
||||
// first try with a max wait time of 0 and ensure that this returns back without waiting forever
|
||||
try {
|
||||
metadata.awaitUpdate(metadata.requestUpdate(), 0);
|
||||
fail("Wait on metadata update was expected to timeout, but it didn't");
|
||||
} catch (TimeoutException te) {
|
||||
// expected
|
||||
}
|
||||
// now try with a higher timeout value once
|
||||
final long TWO_SECOND_WAIT = 2000;
|
||||
try {
|
||||
metadata.awaitUpdate(metadata.requestUpdate(), TWO_SECOND_WAIT);
|
||||
fail("Wait on metadata update was expected to timeout, but it didn't");
|
||||
} catch (TimeoutException te) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
private Thread asyncFetch(final String topic) {
|
||||
Thread thread = new Thread() {
|
||||
public void run() {
|
||||
|
@ -64,5 +92,4 @@ public class MetadataTest {
|
|||
thread.start();
|
||||
return thread;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue