mirror of https://github.com/apache/kafka.git
MINOR Fix a few test names (#17788)
Remove or update custom display names to make sure we actually include the test method as the first part of the display name. Reviewers: Chia-Ping Tsai <chia7712@gmail.com>, Bill Bejeck <bill@confluent.io>
This commit is contained in:
parent
6fc6e87382
commit
48ff6a6b53
|
@ -103,7 +103,7 @@ public class RangeAssignorTest {
|
|||
assertTrue(assignment.get(consumer1).isEmpty());
|
||||
}
|
||||
|
||||
@ParameterizedTest(name = "rackConfig = {0}")
|
||||
@ParameterizedTest(name = "{displayName}.rackConfig = {0}")
|
||||
@EnumSource(RackConfig.class)
|
||||
public void testOneConsumerOneTopic(RackConfig rackConfig) {
|
||||
initializeRacks(rackConfig);
|
||||
|
|
|
@ -21,7 +21,6 @@ import org.apache.kafka.common.utils.MockTime;
|
|||
import org.apache.kafka.common.utils.Time;
|
||||
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.DisplayName;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import java.util.List;
|
||||
|
@ -41,7 +40,6 @@ class SampledStatTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
@DisplayName("Sample should be purged if doesn't overlap the window")
|
||||
public void testSampleIsPurgedIfDoesntOverlap() {
|
||||
MetricConfig config = new MetricConfig().timeWindow(1, SECONDS).samples(2);
|
||||
|
||||
|
@ -50,11 +48,10 @@ class SampledStatTest {
|
|||
time.sleep(2500);
|
||||
|
||||
double numSamples = stat.measure(config, time.milliseconds());
|
||||
assertEquals(0, numSamples);
|
||||
assertEquals(0, numSamples, "Sample should be purged if doesn't overlap the window");
|
||||
}
|
||||
|
||||
@Test
|
||||
@DisplayName("Sample should be kept if overlaps the window")
|
||||
public void testSampleIsKeptIfOverlaps() {
|
||||
MetricConfig config = new MetricConfig().timeWindow(1, SECONDS).samples(2);
|
||||
|
||||
|
@ -63,11 +60,10 @@ class SampledStatTest {
|
|||
time.sleep(1500);
|
||||
|
||||
double numSamples = stat.measure(config, time.milliseconds());
|
||||
assertEquals(1, numSamples);
|
||||
assertEquals(1, numSamples, "Sample should be kept if overlaps the window");
|
||||
}
|
||||
|
||||
@Test
|
||||
@DisplayName("Sample should be kept if overlaps the window and is n+1")
|
||||
public void testSampleIsKeptIfOverlapsAndExtra() {
|
||||
MetricConfig config = new MetricConfig().timeWindow(1, SECONDS).samples(2);
|
||||
|
||||
|
@ -80,7 +76,7 @@ class SampledStatTest {
|
|||
stat.record(config, 1, time.milliseconds());
|
||||
|
||||
double numSamples = stat.measure(config, time.milliseconds());
|
||||
assertEquals(3, numSamples);
|
||||
assertEquals(3, numSamples, "Sample should be kept if overlaps the window and is n+1");
|
||||
}
|
||||
|
||||
// Creates a sample with events at the start and at the end. Positions clock at the end.
|
||||
|
|
|
@ -76,7 +76,7 @@ public class SslVersionsTransportLayerTest {
|
|||
* Tests that connection success with the default TLS version.
|
||||
* Note that debug mode for javax.net.ssl can be enabled via {@code System.setProperty("javax.net.debug", "ssl:handshake");}
|
||||
*/
|
||||
@ParameterizedTest(name = "tlsServerProtocol = {0}, tlsClientProtocol = {1}")
|
||||
@ParameterizedTest(name = "testTlsDefaults(tlsServerProtocol = {0}, tlsClientProtocol = {1})")
|
||||
@MethodSource("parameters")
|
||||
public void testTlsDefaults(List<String> serverProtocols, List<String> clientProtocols) throws Exception {
|
||||
// Create certificates for use by client and server. Add server cert to client truststore and vice versa.
|
||||
|
|
|
@ -86,7 +86,7 @@ public class LazyDownConversionRecordsTest {
|
|||
* than the number of bytes we get after conversion. This causes overflow message batch(es) to be appended towards the
|
||||
* end of the converted output.
|
||||
*/
|
||||
@ParameterizedTest(name = "compressionType={0}, toMagic={1}, overflow={2}")
|
||||
@ParameterizedTest
|
||||
@MethodSource("parameters")
|
||||
public void testConversion(CompressionType compressionType, byte toMagic, boolean overflow) throws IOException {
|
||||
doTestConversion(compressionType, toMagic, overflow);
|
||||
|
|
|
@ -69,7 +69,7 @@ public class RemoteLogMetadataCacheTest {
|
|||
}
|
||||
}
|
||||
|
||||
@ParameterizedTest(name = "isInitialized={0}")
|
||||
@ParameterizedTest
|
||||
@ValueSource(booleans = {true, false})
|
||||
public void testCacheUpdateMetadataOnInvalidArgs(boolean isInitialized) {
|
||||
if (isInitialized) {
|
||||
|
|
|
@ -55,7 +55,6 @@ import org.junit.jupiter.api.AfterAll;
|
|||
import org.junit.jupiter.api.AfterEach;
|
||||
import org.junit.jupiter.api.BeforeAll;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.DisplayName;
|
||||
import org.junit.jupiter.api.Tag;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.TestInfo;
|
||||
|
@ -148,8 +147,8 @@ public class KafkaStreamsTelemetryIntegrationTest {
|
|||
|
||||
@ParameterizedTest
|
||||
@ValueSource(strings = {"INFO", "DEBUG", "TRACE"})
|
||||
@DisplayName("End-to-end test validating metrics pushed to broker")
|
||||
public void shouldPushMetricsToBroker(final String recordingLevel) throws Exception {
|
||||
// End-to-end test validating metrics pushed to broker
|
||||
streamsApplicationProperties = props(true);
|
||||
streamsApplicationProperties.put(StreamsConfig.METRICS_RECORDING_LEVEL_CONFIG, recordingLevel);
|
||||
final Topology topology = simpleTopology();
|
||||
|
@ -202,8 +201,8 @@ public class KafkaStreamsTelemetryIntegrationTest {
|
|||
|
||||
@ParameterizedTest
|
||||
@MethodSource("singleAndMultiTaskParameters")
|
||||
@DisplayName("Streams metrics should get passed to Admin and Consumer")
|
||||
public void shouldPassMetrics(final String topologyType, final boolean stateUpdaterEnabled) throws Exception {
|
||||
// Streams metrics should get passed to Admin and Consumer
|
||||
streamsApplicationProperties = props(stateUpdaterEnabled);
|
||||
final Topology topology = topologyType.equals("simple") ? simpleTopology() : complexTopology();
|
||||
|
||||
|
@ -232,8 +231,8 @@ public class KafkaStreamsTelemetryIntegrationTest {
|
|||
|
||||
@ParameterizedTest
|
||||
@MethodSource("multiTaskParameters")
|
||||
@DisplayName("Correct streams metrics should get passed with dynamic membership")
|
||||
public void shouldPassCorrectMetricsDynamicInstances(final boolean stateUpdaterEnabled) throws Exception {
|
||||
// Correct streams metrics should get passed with dynamic membership
|
||||
streamsApplicationProperties = props(stateUpdaterEnabled);
|
||||
streamsApplicationProperties.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory(appId).getPath() + "-ks1");
|
||||
streamsApplicationProperties.put(StreamsConfig.CLIENT_ID_CONFIG, appId + "-ks1");
|
||||
|
@ -324,8 +323,8 @@ public class KafkaStreamsTelemetryIntegrationTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
@DisplayName("Streams metrics should not be visible in client metrics")
|
||||
public void passedMetricsShouldNotLeakIntoClientMetrics() throws Exception {
|
||||
// Streams metrics should not be visible in client metrics
|
||||
streamsApplicationProperties = props(true);
|
||||
final Topology topology = complexTopology();
|
||||
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.apache.kafka.common.utils.Time;
|
|||
|
||||
import org.junit.jupiter.api.AfterEach;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.DisplayName;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
@ -75,26 +74,25 @@ class StreamsClientMetricsDelegatingReporterTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
@DisplayName("Should register metrics from init method")
|
||||
public void shouldInitMetrics() {
|
||||
final List<KafkaMetric> metrics = Arrays.asList(streamClientMetricOne, streamClientMetricTwo, streamClientMetricThree, kafkaMetricWithThreadIdTag);
|
||||
streamsClientMetricsDelegatingReporter.init(metrics);
|
||||
final List<KafkaMetric> expectedMetrics = Arrays.asList(streamClientMetricOne, streamClientMetricTwo, streamClientMetricThree);
|
||||
assertEquals(expectedMetrics, mockAdminClient.addedMetrics());
|
||||
assertEquals(expectedMetrics, mockAdminClient.addedMetrics(),
|
||||
"Should register metrics from init method");
|
||||
}
|
||||
|
||||
@Test
|
||||
@DisplayName("Should register client instance metrics only")
|
||||
public void shouldRegisterCorrectMetrics() {
|
||||
streamsClientMetricsDelegatingReporter.metricChange(kafkaMetricWithThreadIdTag);
|
||||
assertEquals(0, mockAdminClient.addedMetrics().size());
|
||||
|
||||
streamsClientMetricsDelegatingReporter.metricChange(streamClientMetricOne);
|
||||
assertEquals(1, mockAdminClient.addedMetrics().size());
|
||||
assertEquals(1, mockAdminClient.addedMetrics().size(),
|
||||
"Should register client instance metrics only");
|
||||
}
|
||||
|
||||
@Test
|
||||
@DisplayName("Should remove client instance metrics")
|
||||
public void metricRemoval() {
|
||||
streamsClientMetricsDelegatingReporter.metricChange(streamClientMetricOne);
|
||||
streamsClientMetricsDelegatingReporter.metricChange(streamClientMetricTwo);
|
||||
|
@ -102,6 +100,7 @@ class StreamsClientMetricsDelegatingReporterTest {
|
|||
assertEquals(3, mockAdminClient.addedMetrics().size());
|
||||
|
||||
streamsClientMetricsDelegatingReporter.metricRemoval(streamClientMetricOne);
|
||||
assertEquals(2, mockAdminClient.addedMetrics().size());
|
||||
assertEquals(2, mockAdminClient.addedMetrics().size(),
|
||||
"Should remove client instance metrics");
|
||||
}
|
||||
}
|
|
@ -27,7 +27,6 @@ import org.apache.kafka.common.utils.Time;
|
|||
|
||||
import org.junit.jupiter.api.AfterEach;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.DisplayName;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
@ -84,23 +83,22 @@ class StreamsThreadMetricsDelegatingReporterTest {
|
|||
|
||||
|
||||
@Test
|
||||
@DisplayName("Init method should register metrics it receives as parameters")
|
||||
public void shouldInitMetrics() {
|
||||
final List<KafkaMetric> allMetrics = Arrays.asList(kafkaMetricOneHasThreadIdTag, kafkaMetricTwoHasThreadIdTag, kafkaMetricThreeHasThreadIdTag);
|
||||
final List<KafkaMetric> expectedMetrics = Arrays.asList(kafkaMetricOneHasThreadIdTag, kafkaMetricTwoHasThreadIdTag, kafkaMetricThreeHasThreadIdTag);
|
||||
streamsThreadMetricsDelegatingReporter.init(allMetrics);
|
||||
assertEquals(expectedMetrics, mockConsumer.addedMetrics());
|
||||
assertEquals(expectedMetrics, mockConsumer.addedMetrics(),
|
||||
"Init method should register metrics it receives as parameters");
|
||||
}
|
||||
|
||||
@Test
|
||||
@DisplayName("Should register metrics with thread-id in tag map")
|
||||
public void shouldRegisterMetrics() {
|
||||
streamsThreadMetricsDelegatingReporter.metricChange(kafkaMetricOneHasThreadIdTag);
|
||||
assertEquals(kafkaMetricOneHasThreadIdTag, mockConsumer.addedMetrics().get(0));
|
||||
assertEquals(kafkaMetricOneHasThreadIdTag, mockConsumer.addedMetrics().get(0),
|
||||
"Should register metrics with thread-id in tag map");
|
||||
}
|
||||
|
||||
@Test
|
||||
@DisplayName("Should remove metrics")
|
||||
public void shouldRemoveMetrics() {
|
||||
streamsThreadMetricsDelegatingReporter.metricChange(kafkaMetricOneHasThreadIdTag);
|
||||
streamsThreadMetricsDelegatingReporter.metricChange(kafkaMetricTwoHasThreadIdTag);
|
||||
|
@ -109,13 +107,14 @@ class StreamsThreadMetricsDelegatingReporterTest {
|
|||
assertEquals(expected, mockConsumer.addedMetrics());
|
||||
streamsThreadMetricsDelegatingReporter.metricRemoval(kafkaMetricOneHasThreadIdTag);
|
||||
expected = Arrays.asList(kafkaMetricTwoHasThreadIdTag, kafkaMetricThreeHasThreadIdTag);
|
||||
assertEquals(expected, mockConsumer.addedMetrics());
|
||||
assertEquals(expected, mockConsumer.addedMetrics(),
|
||||
"Should remove metrics");
|
||||
}
|
||||
|
||||
@Test
|
||||
@DisplayName("Should not register metrics without thread-id tag")
|
||||
public void shouldNotRegisterMetricsWithoutThreadIdTag() {
|
||||
streamsThreadMetricsDelegatingReporter.metricChange(kafkaMetricWithoutThreadIdTag);
|
||||
assertEquals(0, mockConsumer.addedMetrics().size());
|
||||
assertEquals(0, mockConsumer.addedMetrics().size(),
|
||||
"Should not register metrics without thread-id tag");
|
||||
}
|
||||
}
|
|
@ -34,7 +34,6 @@ import org.apache.kafka.streams.state.StoreBuilder;
|
|||
import org.apache.kafka.streams.state.Stores;
|
||||
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.DisplayName;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
@ -50,13 +49,11 @@ import static java.util.Arrays.asList;
|
|||
public class KStreamNewProcessorApiTest {
|
||||
|
||||
@Test
|
||||
@DisplayName("Should attach the state store using ConnectedStoreProvider")
|
||||
void shouldGetStateStoreWithConnectedStoreProvider() {
|
||||
runTest(false);
|
||||
}
|
||||
|
||||
@Test
|
||||
@DisplayName("Should attach the state store StreamBuilder.addStateStore")
|
||||
void shouldGetStateStoreWithStreamBuilder() {
|
||||
runTest(true);
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@ package org.apache.kafka.streams.query;
|
|||
import org.apache.kafka.streams.query.internals.SucceededQueryResult;
|
||||
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.DisplayName;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import static org.hamcrest.MatcherAssert.assertThat;
|
||||
|
@ -40,26 +39,24 @@ class StateQueryResultTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
@DisplayName("Zero query results shouldn't error")
|
||||
void getOnlyPartitionResultNoResultsTest() {
|
||||
stringStateQueryResult.addResult(0, noResultsFound);
|
||||
final QueryResult<String> result = stringStateQueryResult.getOnlyPartitionResult();
|
||||
assertThat(result, nullValue());
|
||||
assertThat("Zero query results shouldn't error", result, nullValue());
|
||||
}
|
||||
|
||||
@Test
|
||||
@DisplayName("Valid query results still works")
|
||||
void getOnlyPartitionResultWithSingleResultTest() {
|
||||
stringStateQueryResult.addResult(0, validResult);
|
||||
final QueryResult<String> result = stringStateQueryResult.getOnlyPartitionResult();
|
||||
assertThat(result.getResult(), is("Foo"));
|
||||
assertThat("Valid query results still works", result.getResult(), is("Foo"));
|
||||
}
|
||||
|
||||
@Test
|
||||
@DisplayName("More than one query result throws IllegalArgumentException ")
|
||||
void getOnlyPartitionResultMultipleResults() {
|
||||
stringStateQueryResult.addResult(0, validResult);
|
||||
stringStateQueryResult.addResult(1, validResult);
|
||||
assertThrows(IllegalArgumentException.class, () -> stringStateQueryResult.getOnlyPartitionResult());
|
||||
assertThrows(IllegalArgumentException.class, () -> stringStateQueryResult.getOnlyPartitionResult(),
|
||||
"More than one query result throws IllegalArgumentException");
|
||||
}
|
||||
}
|
|
@ -155,7 +155,7 @@ public class MockProcessorContextStateStoreTest {
|
|||
return values.stream();
|
||||
}
|
||||
|
||||
@ParameterizedTest(name = "builder = {0}, timestamped = {1}, caching = {2}, logging = {3}")
|
||||
@ParameterizedTest
|
||||
@MethodSource(value = "parameters")
|
||||
public void shouldEitherInitOrThrow(final StoreBuilder<StateStore> builder,
|
||||
final boolean timestamped,
|
||||
|
|
Loading…
Reference in New Issue