mirror of https://github.com/apache/kafka.git
MINOR: Cleanup Tools Module (2/n) (#20096)
Now that Kafka support Java 17, this PR makes some changes in tools module. The changes in this PR are limited to only some files. A future PR(s) shall follow. The changes mostly include: - Collections.emptyList(), Collections.singletonList() and Arrays.asList() are replaced with List.of() - Collections.emptyMap() and Collections.singletonMap() are replaced with Map.of() - Collections.singleton() is replaced with Set.of() Some minor changes to use the enhanced switch. Sub modules targeted: tools/src/test Reviewers: Chia-Ping Tsai <chia7712@gmail.com>
This commit is contained in:
parent
a12d38f091
commit
b9413ea4d6
|
@ -51,12 +51,10 @@ import java.io.BufferedWriter;
|
|||
import java.io.File;
|
||||
import java.io.FileWriter;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.Set;
|
||||
|
||||
import static java.time.Duration.ofMillis;
|
||||
import static org.apache.kafka.streams.integration.utils.IntegrationTestUtils.waitForEmptyConsumerGroup;
|
||||
|
@ -176,7 +174,7 @@ public abstract class AbstractResetIntegrationTest {
|
|||
}
|
||||
|
||||
private void add10InputElements() {
|
||||
final List<KeyValue<Long, String>> records = Arrays.asList(KeyValue.pair(0L, "aaa"),
|
||||
final List<KeyValue<Long, String>> records = List.of(KeyValue.pair(0L, "aaa"),
|
||||
KeyValue.pair(1L, "bbb"),
|
||||
KeyValue.pair(0L, "ccc"),
|
||||
KeyValue.pair(1L, "ddd"),
|
||||
|
@ -189,7 +187,7 @@ public abstract class AbstractResetIntegrationTest {
|
|||
|
||||
for (final KeyValue<Long, String> record : records) {
|
||||
mockTime.sleep(10);
|
||||
IntegrationTestUtils.produceKeyValuesSynchronouslyWithTimestamp(INPUT_TOPIC, Collections.singleton(record), producerConfig, mockTime.milliseconds());
|
||||
IntegrationTestUtils.produceKeyValuesSynchronouslyWithTimestamp(INPUT_TOPIC, Set.of(record), producerConfig, mockTime.milliseconds());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -211,7 +209,7 @@ public abstract class AbstractResetIntegrationTest {
|
|||
|
||||
final List<String> internalTopics = cluster.getAllTopicsInCluster().stream()
|
||||
.filter(StreamsResetter::matchesInternalTopicFormat)
|
||||
.collect(Collectors.toList());
|
||||
.toList();
|
||||
cleanGlobal(false,
|
||||
"--internal-topics",
|
||||
String.join(",", internalTopics.subList(1, internalTopics.size())),
|
||||
|
@ -288,7 +286,7 @@ public abstract class AbstractResetIntegrationTest {
|
|||
if (!useRepartitioned) {
|
||||
IntegrationTestUtils.produceKeyValuesSynchronouslyWithTimestamp(
|
||||
INTERMEDIATE_USER_TOPIC,
|
||||
Collections.singleton(badMessage),
|
||||
Set.of(badMessage),
|
||||
producerConfig,
|
||||
mockTime.milliseconds());
|
||||
}
|
||||
|
@ -375,7 +373,7 @@ public abstract class AbstractResetIntegrationTest {
|
|||
final String resetScenarioArg,
|
||||
final String appID) throws Exception {
|
||||
final List<String> parameterList = new ArrayList<>(
|
||||
Arrays.asList("--application-id", appID,
|
||||
List.of("--application-id", appID,
|
||||
"--bootstrap-server", cluster.bootstrapServers(),
|
||||
"--input-topics", INPUT_TOPIC
|
||||
));
|
||||
|
|
|
@ -31,8 +31,8 @@ import org.apache.kafka.common.test.api.ClusterTestDefaults;
|
|||
import org.apache.kafka.server.config.ServerConfigs;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Properties;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
|
@ -56,7 +56,7 @@ public class BrokerApiVersionsCommandTest {
|
|||
|
||||
NodeApiVersions nodeApiVersions = new NodeApiVersions(
|
||||
ApiVersionsResponse.filterApis(listenerType, true, true),
|
||||
Collections.emptyList());
|
||||
List.of());
|
||||
Iterator<ApiKeys> apiKeysIter = ApiKeys.clientApis().iterator();
|
||||
while (apiKeysIter.hasNext()) {
|
||||
ApiKeys apiKey = apiKeysIter.next();
|
||||
|
|
|
@ -33,7 +33,6 @@ import org.junit.jupiter.api.Test;
|
|||
import org.mockito.ArgumentCaptor;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
|
@ -209,7 +208,7 @@ public class ClientMetricsCommandTest {
|
|||
ClientMetricsCommand.ClientMetricsService service = new ClientMetricsCommand.ClientMetricsService(adminClient);
|
||||
|
||||
ConfigResource cr = new ConfigResource(ConfigResource.Type.CLIENT_METRICS, clientMetricsName);
|
||||
Config cfg = new Config(Collections.singleton(new ConfigEntry("metrics", "org.apache.kafka.producer.")));
|
||||
Config cfg = new Config(Set.of(new ConfigEntry("metrics", "org.apache.kafka.producer.")));
|
||||
DescribeConfigsResult describeResult = AdminClientTestUtils.describeConfigsResult(cr, cfg);
|
||||
when(adminClient.describeConfigs(any())).thenReturn(describeResult);
|
||||
AlterConfigsResult alterResult = AdminClientTestUtils.alterConfigsResult(cr);
|
||||
|
@ -237,7 +236,7 @@ public class ClientMetricsCommandTest {
|
|||
ConfigResource.Type.CLIENT_METRICS, Set.of(clientMetricsName)
|
||||
));
|
||||
when(adminClient.listConfigResources(any(), any())).thenReturn(listConfigResourcesResult);
|
||||
Config cfg = new Config(Collections.singleton(new ConfigEntry("metrics", "org.apache.kafka.producer.")));
|
||||
Config cfg = new Config(Set.of(new ConfigEntry("metrics", "org.apache.kafka.producer.")));
|
||||
DescribeConfigsResult describeResult = AdminClientTestUtils.describeConfigsResult(cr, cfg);
|
||||
when(adminClient.describeConfigs(any())).thenReturn(describeResult);
|
||||
|
||||
|
@ -284,7 +283,7 @@ public class ClientMetricsCommandTest {
|
|||
ListConfigResourcesResult result = AdminClientTestUtils.listConfigResourcesResult(clientMetricsName);
|
||||
when(adminClient.listConfigResources(any(), any())).thenReturn(result);
|
||||
ConfigResource cr = new ConfigResource(ConfigResource.Type.CLIENT_METRICS, clientMetricsName);
|
||||
Config cfg = new Config(Collections.singleton(new ConfigEntry("metrics", "org.apache.kafka.producer.")));
|
||||
Config cfg = new Config(Set.of(new ConfigEntry("metrics", "org.apache.kafka.producer.")));
|
||||
DescribeConfigsResult describeResult = AdminClientTestUtils.describeConfigsResult(cr, cfg);
|
||||
when(adminClient.describeConfigs(any())).thenReturn(describeResult);
|
||||
|
||||
|
@ -326,7 +325,7 @@ public class ClientMetricsCommandTest {
|
|||
ListConfigResourcesResult result = AdminClientTestUtils.listConfigResourcesResult(Errors.UNSUPPORTED_VERSION.exception());
|
||||
when(adminClient.listConfigResources(any(), any())).thenReturn(result);
|
||||
|
||||
assertThrows(ExecutionException.class, () -> service.listClientMetrics());
|
||||
assertThrows(ExecutionException.class, service::listClientMetrics);
|
||||
}
|
||||
|
||||
private void assertInitializeInvalidOptionsExitCode(int expected, String[] options) {
|
||||
|
|
|
@ -29,7 +29,6 @@ import java.io.PrintStream;
|
|||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
|
@ -71,10 +70,10 @@ public class ClusterToolTest {
|
|||
|
||||
@ClusterTest(brokers = 2, types = {Type.KRAFT, Type.CO_KRAFT})
|
||||
public void testListEndpointsArgumentWithBootstrapServer(ClusterInstance clusterInstance) {
|
||||
List<Integer> brokerIds = clusterInstance.brokerIds().stream().collect(Collectors.toList());
|
||||
List<Integer> brokerIds = clusterInstance.brokerIds().stream().toList();
|
||||
clusterInstance.shutdownBroker(brokerIds.get(0));
|
||||
|
||||
List<String> ports = Arrays.stream(clusterInstance.bootstrapServers().split(",")).map(b -> b.split(":")[1]).collect(Collectors.toList());
|
||||
List<String> ports = Arrays.stream(clusterInstance.bootstrapServers().split(",")).map(b -> b.split(":")[1]).toList();
|
||||
String format = "%-10s %-9s %-10s %-10s %-10s %-15s%n%-10s %-9s %-10s %-10s %-10s %-15s%n%-10s %-9s %-10s %-10s %-10s %-6s";
|
||||
String expected = String.format(format,
|
||||
"ID", "HOST", "PORT", "RACK", "STATE", "ENDPOINT_TYPE",
|
||||
|
@ -110,7 +109,7 @@ public class ClusterToolTest {
|
|||
int id = clusterInstance.controllerIds().iterator().next();
|
||||
String format = "%-10s %-9s %-10s %-10s %-15s%n%-10s %-9s %-10s %-10s %-10s";
|
||||
String expected = String.format(format, "ID", "HOST", "PORT", "RACK", "ENDPOINT_TYPE", id, "localhost", port, "null", "controller");
|
||||
assertTrue(output.equals(expected));
|
||||
assertEquals(expected, output);
|
||||
}
|
||||
|
||||
@ClusterTest(brokers = 3, types = {Type.KRAFT, Type.CO_KRAFT})
|
||||
|
|
|
@ -52,10 +52,6 @@ import java.util.function.Consumer;
|
|||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static java.util.Arrays.asList;
|
||||
import static java.util.Collections.singleton;
|
||||
import static java.util.Collections.singletonList;
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static org.apache.kafka.common.config.SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG;
|
||||
import static org.apache.kafka.common.config.SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG;
|
||||
import static org.apache.kafka.common.config.SslConfigs.SSL_TRUSTSTORE_TYPE_CONFIG;
|
||||
|
@ -246,27 +242,27 @@ public class ConfigCommandIntegrationTest {
|
|||
|
||||
try (Admin client = cluster.admin()) {
|
||||
// Add config
|
||||
alterAndVerifyConfig(client, Optional.of(defaultBrokerId), singletonMap(MESSAGE_MAX_BYTES_CONFIG, "110000"), alterOpts);
|
||||
alterAndVerifyConfig(client, Optional.empty(), singletonMap(MESSAGE_MAX_BYTES_CONFIG, "120000"), alterOpts);
|
||||
alterAndVerifyConfig(client, Optional.of(defaultBrokerId), Map.of(MESSAGE_MAX_BYTES_CONFIG, "110000"), alterOpts);
|
||||
alterAndVerifyConfig(client, Optional.empty(), Map.of(MESSAGE_MAX_BYTES_CONFIG, "120000"), alterOpts);
|
||||
|
||||
// Change config
|
||||
alterAndVerifyConfig(client, Optional.of(defaultBrokerId), singletonMap(MESSAGE_MAX_BYTES_CONFIG, "130000"), alterOpts);
|
||||
alterAndVerifyConfig(client, Optional.empty(), singletonMap(MESSAGE_MAX_BYTES_CONFIG, "140000"), alterOpts);
|
||||
alterAndVerifyConfig(client, Optional.of(defaultBrokerId), Map.of(MESSAGE_MAX_BYTES_CONFIG, "130000"), alterOpts);
|
||||
alterAndVerifyConfig(client, Optional.empty(), Map.of(MESSAGE_MAX_BYTES_CONFIG, "140000"), alterOpts);
|
||||
|
||||
// Delete config
|
||||
deleteAndVerifyConfigValue(client, defaultBrokerId, singleton(MESSAGE_MAX_BYTES_CONFIG), true, alterOpts);
|
||||
deleteAndVerifyConfigValue(client, defaultBrokerId, Set.of(MESSAGE_MAX_BYTES_CONFIG), true, alterOpts);
|
||||
|
||||
// Listener configs: should work only with listener name
|
||||
alterAndVerifyConfig(client, Optional.of(defaultBrokerId),
|
||||
singletonMap("listener.name.internal.ssl.keystore.location", "/tmp/test.jks"), alterOpts);
|
||||
Map.of("listener.name.internal.ssl.keystore.location", "/tmp/test.jks"), alterOpts);
|
||||
// Per-broker config configured at default cluster-level should fail
|
||||
assertThrows(ExecutionException.class,
|
||||
() -> alterConfigWithAdmin(client, Optional.empty(),
|
||||
singletonMap("listener.name.internal.ssl.keystore.location", "/tmp/test.jks"), alterOpts));
|
||||
Map.of("listener.name.internal.ssl.keystore.location", "/tmp/test.jks"), alterOpts));
|
||||
deleteAndVerifyConfigValue(client, defaultBrokerId,
|
||||
singleton("listener.name.internal.ssl.keystore.location"), false, alterOpts);
|
||||
Set.of("listener.name.internal.ssl.keystore.location"), false, alterOpts);
|
||||
alterConfigWithAdmin(client, Optional.of(defaultBrokerId),
|
||||
singletonMap("listener.name.external.ssl.keystore.password", "secret"), alterOpts);
|
||||
Map.of("listener.name.external.ssl.keystore.password", "secret"), alterOpts);
|
||||
|
||||
Map<String, String> configs = new HashMap<>();
|
||||
configs.put("listener.name.external.ssl.keystore.password", "secret");
|
||||
|
@ -281,12 +277,11 @@ public class ConfigCommandIntegrationTest {
|
|||
@ClusterTest
|
||||
public void testGroupConfigUpdateUsingKraft() throws Exception {
|
||||
List<String> alterOpts = Stream.concat(entityOp(Optional.of(defaultGroupName)).stream(),
|
||||
Stream.of("--entity-type", "groups", "--alter"))
|
||||
.collect(Collectors.toList());
|
||||
Stream.of("--entity-type", "groups", "--alter")).toList();
|
||||
verifyGroupConfigUpdate(alterOpts);
|
||||
|
||||
// Test for the --group alias
|
||||
verifyGroupConfigUpdate(asList("--group", defaultGroupName, "--alter"));
|
||||
verifyGroupConfigUpdate(List.of("--group", defaultGroupName, "--alter"));
|
||||
}
|
||||
|
||||
@ClusterTest
|
||||
|
@ -343,7 +338,7 @@ public class ConfigCommandIntegrationTest {
|
|||
deleteAndVerifyGroupConfigValue(client, defaultGroupName, configs, alterOpts);
|
||||
|
||||
// Unknown config configured should fail
|
||||
assertThrows(ExecutionException.class, () -> alterConfigWithAdmin(client, singletonMap("unknown.config", "20000"), alterOpts));
|
||||
assertThrows(ExecutionException.class, () -> alterConfigWithAdmin(client, Map.of("unknown.config", "20000"), alterOpts));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -351,12 +346,11 @@ public class ConfigCommandIntegrationTest {
|
|||
@ClusterTest(types = {Type.KRAFT})
|
||||
public void testClientMetricsConfigUpdate() throws Exception {
|
||||
List<String> alterOpts = Stream.concat(entityOp(Optional.of(defaultClientMetricsName)).stream(),
|
||||
Stream.of("--entity-type", "client-metrics", "--alter"))
|
||||
.collect(Collectors.toList());
|
||||
Stream.of("--entity-type", "client-metrics", "--alter")).toList();
|
||||
verifyClientMetricsConfigUpdate(alterOpts);
|
||||
|
||||
// Test for the --client-metrics alias
|
||||
verifyClientMetricsConfigUpdate(asList("--client-metrics", defaultClientMetricsName, "--alter"));
|
||||
verifyClientMetricsConfigUpdate(List.of("--client-metrics", defaultClientMetricsName, "--alter"));
|
||||
}
|
||||
|
||||
private void verifyClientMetricsConfigUpdate(List<String> alterOpts) throws Exception {
|
||||
|
@ -373,7 +367,7 @@ public class ConfigCommandIntegrationTest {
|
|||
deleteAndVerifyClientMetricsConfigValue(client, defaultClientMetricsName, configs, alterOpts);
|
||||
|
||||
// Unknown config configured should fail
|
||||
assertThrows(ExecutionException.class, () -> alterConfigWithAdmin(client, singletonMap("unknown.config", "20000"), alterOpts));
|
||||
assertThrows(ExecutionException.class, () -> alterConfigWithAdmin(client, Map.of("unknown.config", "20000"), alterOpts));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -384,13 +378,13 @@ public class ConfigCommandIntegrationTest {
|
|||
try (Admin client = cluster.admin()) {
|
||||
assertThrows(ExecutionException.class,
|
||||
() -> alterConfigWithAdmin(client, Optional.of(defaultBrokerId),
|
||||
singletonMap(AUTO_CREATE_TOPICS_ENABLE_CONFIG, "false"), alterOpts));
|
||||
Map.of(AUTO_CREATE_TOPICS_ENABLE_CONFIG, "false"), alterOpts));
|
||||
assertThrows(ExecutionException.class,
|
||||
() -> alterConfigWithAdmin(client, Optional.of(defaultBrokerId),
|
||||
singletonMap(AUTO_LEADER_REBALANCE_ENABLE_CONFIG, "false"), alterOpts));
|
||||
Map.of(AUTO_LEADER_REBALANCE_ENABLE_CONFIG, "false"), alterOpts));
|
||||
assertThrows(ExecutionException.class,
|
||||
() -> alterConfigWithAdmin(client, Optional.of(defaultBrokerId),
|
||||
singletonMap("broker.id", "1"), alterOpts));
|
||||
Map.of("broker.id", "1"), alterOpts));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -400,11 +394,11 @@ public class ConfigCommandIntegrationTest {
|
|||
|
||||
try (Admin client = cluster.admin()) {
|
||||
alterAndVerifyConfig(client, Optional.of(defaultBrokerId),
|
||||
singletonMap("log.flush.interval.messages", "100"), alterOpts);
|
||||
Map.of("log.flush.interval.messages", "100"), alterOpts);
|
||||
alterAndVerifyConfig(client, Optional.of(defaultBrokerId),
|
||||
singletonMap("log.retention.bytes", "20"), alterOpts);
|
||||
Map.of("log.retention.bytes", "20"), alterOpts);
|
||||
alterAndVerifyConfig(client, Optional.of(defaultBrokerId),
|
||||
singletonMap("log.retention.ms", "2"), alterOpts);
|
||||
Map.of("log.retention.ms", "2"), alterOpts);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -415,13 +409,13 @@ public class ConfigCommandIntegrationTest {
|
|||
|
||||
try (Admin client = cluster.admin()) {
|
||||
alterAndVerifyConfig(client, Optional.of(defaultBrokerId),
|
||||
singletonMap(listenerName + "ssl.truststore.type", "PKCS12"), alterOpts);
|
||||
Map.of(listenerName + "ssl.truststore.type", "PKCS12"), alterOpts);
|
||||
alterAndVerifyConfig(client, Optional.of(defaultBrokerId),
|
||||
singletonMap(listenerName + "ssl.truststore.location", "/temp/test.jks"), alterOpts);
|
||||
Map.of(listenerName + "ssl.truststore.location", "/temp/test.jks"), alterOpts);
|
||||
alterConfigWithAdmin(client, Optional.of(defaultBrokerId),
|
||||
singletonMap(listenerName + "ssl.truststore.password", "password"), alterOpts);
|
||||
Map.of(listenerName + "ssl.truststore.password", "password"), alterOpts);
|
||||
verifyConfigSecretValue(client, Optional.of(defaultBrokerId),
|
||||
singleton(listenerName + "ssl.truststore.password"));
|
||||
Set.of(listenerName + "ssl.truststore.password"));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -432,13 +426,13 @@ public class ConfigCommandIntegrationTest {
|
|||
try (Admin client = cluster.admin()) {
|
||||
assertThrows(ExecutionException.class,
|
||||
() -> alterConfigWithAdmin(client, Optional.of(defaultBrokerId),
|
||||
singletonMap(SSL_TRUSTSTORE_TYPE_CONFIG, "PKCS12"), alterOpts));
|
||||
Map.of(SSL_TRUSTSTORE_TYPE_CONFIG, "PKCS12"), alterOpts));
|
||||
assertThrows(ExecutionException.class,
|
||||
() -> alterConfigWithAdmin(client, Optional.of(defaultBrokerId),
|
||||
singletonMap(SSL_TRUSTSTORE_LOCATION_CONFIG, "/temp/test.jks"), alterOpts));
|
||||
Map.of(SSL_TRUSTSTORE_LOCATION_CONFIG, "/temp/test.jks"), alterOpts));
|
||||
assertThrows(ExecutionException.class,
|
||||
() -> alterConfigWithAdmin(client, Optional.of(defaultBrokerId),
|
||||
singletonMap(SSL_TRUSTSTORE_PASSWORD_CONFIG, "password"), alterOpts));
|
||||
Map.of(SSL_TRUSTSTORE_PASSWORD_CONFIG, "password"), alterOpts));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -469,7 +463,7 @@ public class ConfigCommandIntegrationTest {
|
|||
|
||||
@ClusterTest
|
||||
public void testUpdateInvalidTopicConfigs() throws ExecutionException, InterruptedException {
|
||||
List<String> alterOpts = asList("--bootstrap-server", cluster.bootstrapServers(), "--entity-type", "topics", "--alter");
|
||||
List<String> alterOpts = List.of("--bootstrap-server", cluster.bootstrapServers(), "--entity-type", "topics", "--alter");
|
||||
try (Admin client = cluster.admin()) {
|
||||
client.createTopics(List.of(new NewTopic("test-config-topic", 1, (short) 1))).all().get();
|
||||
assertInstanceOf(
|
||||
|
@ -480,7 +474,7 @@ public class ConfigCommandIntegrationTest {
|
|||
client,
|
||||
new ConfigCommand.ConfigCommandOptions(
|
||||
toArray(alterOpts,
|
||||
asList("--add-config", "invalid=2", "--entity-type", "topics", "--entity-name", "test-config-topic"))))
|
||||
List.of("--add-config", "invalid=2", "--entity-type", "topics", "--entity-name", "test-config-topic"))))
|
||||
).getCause()
|
||||
);
|
||||
}
|
||||
|
@ -494,14 +488,14 @@ public class ConfigCommandIntegrationTest {
|
|||
public void testUpdateBrokerConfigNotAffectedByInvalidConfig() {
|
||||
try (Admin client = cluster.admin()) {
|
||||
ConfigCommand.alterConfig(client, new ConfigCommand.ConfigCommandOptions(
|
||||
toArray(asList("--bootstrap-server", cluster.bootstrapServers(),
|
||||
toArray(List.of("--bootstrap-server", cluster.bootstrapServers(),
|
||||
"--alter",
|
||||
"--add-config", "log.cleaner.threadzz=2",
|
||||
"--entity-type", "brokers",
|
||||
"--entity-default"))));
|
||||
|
||||
ConfigCommand.alterConfig(client, new ConfigCommand.ConfigCommandOptions(
|
||||
toArray(asList("--bootstrap-server", cluster.bootstrapServers(),
|
||||
toArray(List.of("--bootstrap-server", cluster.bootstrapServers(),
|
||||
"--alter",
|
||||
"--add-config", "log.cleaner.threads=2",
|
||||
"--entity-type", "brokers",
|
||||
|
@ -531,15 +525,15 @@ public class ConfigCommandIntegrationTest {
|
|||
.incrementalAlterConfigs(anyMap(), any(AlterConfigsOptions.class));
|
||||
assertEquals(
|
||||
"The INCREMENTAL_ALTER_CONFIGS API is not supported by the cluster. The API is supported starting from version 2.3.0. You may want to use an older version of this tool to interact with your cluster, or upgrade your brokers to version 2.3.0 or newer to avoid this error.",
|
||||
assertThrows(UnsupportedVersionException.class, () -> {
|
||||
assertThrows(UnsupportedVersionException.class, () ->
|
||||
ConfigCommand.alterConfig(spyAdmin, new ConfigCommand.ConfigCommandOptions(
|
||||
toArray(asList(
|
||||
toArray(List.of(
|
||||
"--bootstrap-server", cluster.bootstrapServers(),
|
||||
"--alter",
|
||||
"--add-config", "log.cleaner.threads=2",
|
||||
"--entity-type", "brokers",
|
||||
"--entity-default"))));
|
||||
}).getMessage()
|
||||
"--entity-default"))))
|
||||
).getMessage()
|
||||
);
|
||||
Mockito.verify(spyAdmin).incrementalAlterConfigs(anyMap(), any(AlterConfigsOptions.class));
|
||||
}
|
||||
|
@ -585,12 +579,12 @@ public class ConfigCommandIntegrationTest {
|
|||
}
|
||||
|
||||
private List<String> entityOp(Optional<String> entityId) {
|
||||
return entityId.map(id -> asList("--entity-name", id))
|
||||
.orElse(singletonList("--entity-default"));
|
||||
return entityId.map(id -> List.of("--entity-name", id))
|
||||
.orElse(List.of("--entity-default"));
|
||||
}
|
||||
|
||||
private List<String> generateDefaultAlterOpts(String bootstrapServers) {
|
||||
return asList("--bootstrap-server", bootstrapServers,
|
||||
return List.of("--bootstrap-server", bootstrapServers,
|
||||
"--entity-type", "brokers", "--alter");
|
||||
}
|
||||
|
||||
|
@ -620,23 +614,23 @@ public class ConfigCommandIntegrationTest {
|
|||
|
||||
private void alterConfigWithAdmin(Admin client, Optional<String> resourceName, Map<String, String> config, List<String> alterOpts) {
|
||||
String configStr = transferConfigMapToString(config);
|
||||
List<String> bootstrapOpts = quorumArgs().collect(Collectors.toList());
|
||||
List<String> bootstrapOpts = quorumArgs().toList();
|
||||
ConfigCommand.ConfigCommandOptions addOpts =
|
||||
new ConfigCommand.ConfigCommandOptions(toArray(bootstrapOpts,
|
||||
entityOp(resourceName),
|
||||
alterOpts,
|
||||
asList("--add-config", configStr)));
|
||||
List.of("--add-config", configStr)));
|
||||
addOpts.checkArgs();
|
||||
ConfigCommand.alterConfig(client, addOpts);
|
||||
}
|
||||
|
||||
private void alterConfigWithAdmin(Admin client, Map<String, String> config, List<String> alterOpts) {
|
||||
String configStr = transferConfigMapToString(config);
|
||||
List<String> bootstrapOpts = quorumArgs().collect(Collectors.toList());
|
||||
List<String> bootstrapOpts = quorumArgs().toList();
|
||||
ConfigCommand.ConfigCommandOptions addOpts =
|
||||
new ConfigCommand.ConfigCommandOptions(toArray(bootstrapOpts,
|
||||
alterOpts,
|
||||
asList("--add-config", configStr)));
|
||||
List.of("--add-config", configStr)));
|
||||
addOpts.checkArgs();
|
||||
ConfigCommand.alterConfig(client, addOpts);
|
||||
}
|
||||
|
@ -675,7 +669,7 @@ public class ConfigCommandIntegrationTest {
|
|||
|
||||
private Stream<ConfigEntry> getConfigEntryStream(Admin client,
|
||||
ConfigResource configResource) throws InterruptedException, ExecutionException {
|
||||
return client.describeConfigs(singletonList(configResource))
|
||||
return client.describeConfigs(List.of(configResource))
|
||||
.all()
|
||||
.get()
|
||||
.values()
|
||||
|
@ -689,8 +683,8 @@ public class ConfigCommandIntegrationTest {
|
|||
boolean hasDefaultValue,
|
||||
List<String> alterOpts) throws Exception {
|
||||
ConfigCommand.ConfigCommandOptions deleteOpts =
|
||||
new ConfigCommand.ConfigCommandOptions(toArray(alterOpts, asList("--entity-name", brokerId),
|
||||
asList("--delete-config", String.join(",", config))));
|
||||
new ConfigCommand.ConfigCommandOptions(toArray(alterOpts, List.of("--entity-name", brokerId),
|
||||
List.of("--delete-config", String.join(",", config))));
|
||||
deleteOpts.checkArgs();
|
||||
ConfigCommand.alterConfig(client, deleteOpts);
|
||||
verifyPerBrokerConfigValue(client, brokerId, config, hasDefaultValue);
|
||||
|
@ -700,11 +694,11 @@ public class ConfigCommandIntegrationTest {
|
|||
String groupName,
|
||||
Map<String, String> defaultConfigs,
|
||||
List<String> alterOpts) throws Exception {
|
||||
List<String> bootstrapOpts = quorumArgs().collect(Collectors.toList());
|
||||
List<String> bootstrapOpts = quorumArgs().toList();
|
||||
ConfigCommand.ConfigCommandOptions deleteOpts =
|
||||
new ConfigCommand.ConfigCommandOptions(toArray(bootstrapOpts,
|
||||
alterOpts,
|
||||
asList("--delete-config", String.join(",", defaultConfigs.keySet()))));
|
||||
List.of("--delete-config", String.join(",", defaultConfigs.keySet()))));
|
||||
deleteOpts.checkArgs();
|
||||
ConfigCommand.alterConfig(client, deleteOpts);
|
||||
verifyGroupConfig(client, groupName, defaultConfigs);
|
||||
|
@ -714,11 +708,11 @@ public class ConfigCommandIntegrationTest {
|
|||
String clientMetricsName,
|
||||
Map<String, String> defaultConfigs,
|
||||
List<String> alterOpts) throws Exception {
|
||||
List<String> bootstrapOpts = quorumArgs().collect(Collectors.toList());
|
||||
List<String> bootstrapOpts = quorumArgs().toList();
|
||||
ConfigCommand.ConfigCommandOptions deleteOpts =
|
||||
new ConfigCommand.ConfigCommandOptions(toArray(bootstrapOpts,
|
||||
alterOpts,
|
||||
asList("--delete-config", String.join(",", defaultConfigs.keySet()))));
|
||||
List.of("--delete-config", String.join(",", defaultConfigs.keySet()))));
|
||||
deleteOpts.checkArgs();
|
||||
ConfigCommand.alterConfig(client, deleteOpts);
|
||||
verifyClientMetricsConfig(client, clientMetricsName, defaultConfigs);
|
||||
|
|
|
@ -53,7 +53,6 @@ import org.junit.jupiter.params.provider.ValueSource;
|
|||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.AbstractMap.SimpleImmutableEntry;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
|
@ -85,8 +84,8 @@ import static org.mockito.Mockito.verify;
|
|||
import static org.mockito.Mockito.when;
|
||||
|
||||
public class ConfigCommandTest {
|
||||
private static final List<String> BROKER_BOOTSTRAP = Arrays.asList("--bootstrap-server", "localhost:9092");
|
||||
private static final List<String> CONTROLLER_BOOTSTRAP = Arrays.asList("--bootstrap-controller", "localhost:9093");
|
||||
private static final List<String> BROKER_BOOTSTRAP = List.of("--bootstrap-server", "localhost:9092");
|
||||
private static final List<String> CONTROLLER_BOOTSTRAP = List.of("--bootstrap-controller", "localhost:9093");
|
||||
|
||||
@Test
|
||||
public void shouldExitWithNonZeroStatusOnArgError() {
|
||||
|
@ -95,7 +94,7 @@ public class ConfigCommandTest {
|
|||
|
||||
@Test
|
||||
public void shouldExitWithNonZeroStatusAlterUserQuotaWithoutEntityName() {
|
||||
assertNonZeroStatusExit(toArray(BROKER_BOOTSTRAP, Arrays.asList(
|
||||
assertNonZeroStatusExit(toArray(BROKER_BOOTSTRAP, List.of(
|
||||
"--entity-type", "users",
|
||||
"--alter", "--add-config", "consumer_byte_rate=20000")));
|
||||
}
|
||||
|
@ -110,7 +109,7 @@ public class ConfigCommandTest {
|
|||
|
||||
@Test
|
||||
public void shouldExitWithNonZeroStatusIfBothBootstrapServerAndBootstrapControllerGiven() {
|
||||
assertNonZeroStatusExit(toArray(BROKER_BOOTSTRAP, CONTROLLER_BOOTSTRAP, Arrays.asList(
|
||||
assertNonZeroStatusExit(toArray(BROKER_BOOTSTRAP, CONTROLLER_BOOTSTRAP, List.of(
|
||||
"--describe", "--broker-defaults")));
|
||||
}
|
||||
|
||||
|
@ -405,9 +404,9 @@ public class ConfigCommandTest {
|
|||
|
||||
File file = TestUtils.tempFile(fileContents);
|
||||
|
||||
List<String> addConfigFileArgs = Arrays.asList("--add-config-file", file.getPath());
|
||||
List<String> addConfigFileArgs = List.of("--add-config-file", file.getPath());
|
||||
|
||||
ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(toArray(Arrays.asList("--bootstrap-server", "localhost:9092",
|
||||
ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(toArray(List.of("--bootstrap-server", "localhost:9092",
|
||||
"--entity-name", "1",
|
||||
"--entity-type", "brokers",
|
||||
"--alter"),
|
||||
|
@ -423,7 +422,7 @@ public class ConfigCommandTest {
|
|||
}
|
||||
|
||||
public void testExpectedEntityTypeNames(List<String> expectedTypes, List<String> expectedNames, List<String> connectOpts, String... args) {
|
||||
ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(toArray(Arrays.asList(connectOpts.get(0), connectOpts.get(1), "--describe"), Arrays.asList(args)));
|
||||
ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(toArray(List.of(connectOpts.get(0), connectOpts.get(1), "--describe"), List.of(args)));
|
||||
createOpts.checkArgs();
|
||||
assertEquals(createOpts.entityTypes().toSeq(), seq(expectedTypes));
|
||||
assertEquals(createOpts.entityNames().toSeq(), seq(expectedNames));
|
||||
|
@ -431,31 +430,31 @@ public class ConfigCommandTest {
|
|||
|
||||
@Test
|
||||
public void testOptionEntityTypeNames() {
|
||||
List<String> connectOpts = Arrays.asList("--bootstrap-server", "localhost:9092");
|
||||
List<String> connectOpts = List.of("--bootstrap-server", "localhost:9092");
|
||||
|
||||
testExpectedEntityTypeNames(Collections.singletonList(ConfigType.TOPIC.value()), Collections.singletonList("A"), connectOpts, "--entity-type", "topics", "--entity-name", "A");
|
||||
testExpectedEntityTypeNames(Collections.singletonList(ConfigType.IP.value()), Collections.singletonList("1.2.3.4"), connectOpts, "--entity-name", "1.2.3.4", "--entity-type", "ips");
|
||||
testExpectedEntityTypeNames(Collections.singletonList(ConfigType.CLIENT_METRICS.value()), Collections.singletonList("A"), connectOpts, "--entity-type", "client-metrics", "--entity-name", "A");
|
||||
testExpectedEntityTypeNames(Collections.singletonList(ConfigType.GROUP.value()), Collections.singletonList("A"), connectOpts, "--entity-type", "groups", "--entity-name", "A");
|
||||
testExpectedEntityTypeNames(Arrays.asList(ConfigType.USER.value(), ConfigType.CLIENT.value()), Arrays.asList("A", ""), connectOpts,
|
||||
testExpectedEntityTypeNames(List.of(ConfigType.TOPIC.value()), List.of("A"), connectOpts, "--entity-type", "topics", "--entity-name", "A");
|
||||
testExpectedEntityTypeNames(List.of(ConfigType.IP.value()), List.of("1.2.3.4"), connectOpts, "--entity-name", "1.2.3.4", "--entity-type", "ips");
|
||||
testExpectedEntityTypeNames(List.of(ConfigType.CLIENT_METRICS.value()), List.of("A"), connectOpts, "--entity-type", "client-metrics", "--entity-name", "A");
|
||||
testExpectedEntityTypeNames(List.of(ConfigType.GROUP.value()), List.of("A"), connectOpts, "--entity-type", "groups", "--entity-name", "A");
|
||||
testExpectedEntityTypeNames(List.of(ConfigType.USER.value(), ConfigType.CLIENT.value()), List.of("A", ""), connectOpts,
|
||||
"--entity-type", "users", "--entity-type", "clients", "--entity-name", "A", "--entity-default");
|
||||
testExpectedEntityTypeNames(Arrays.asList(ConfigType.USER.value(), ConfigType.CLIENT.value()), Arrays.asList("", "B"), connectOpts,
|
||||
testExpectedEntityTypeNames(List.of(ConfigType.USER.value(), ConfigType.CLIENT.value()), List.of("", "B"), connectOpts,
|
||||
"--entity-default", "--entity-name", "B", "--entity-type", "users", "--entity-type", "clients");
|
||||
testExpectedEntityTypeNames(Collections.singletonList(ConfigType.TOPIC.value()), Collections.singletonList("A"), connectOpts, "--topic", "A");
|
||||
testExpectedEntityTypeNames(Collections.singletonList(ConfigType.IP.value()), Collections.singletonList("1.2.3.4"), connectOpts, "--ip", "1.2.3.4");
|
||||
testExpectedEntityTypeNames(Collections.singletonList(ConfigType.GROUP.value()), Collections.singletonList("A"), connectOpts, "--group", "A");
|
||||
testExpectedEntityTypeNames(Arrays.asList(ConfigType.CLIENT.value(), ConfigType.USER.value()), Arrays.asList("B", "A"), connectOpts, "--client", "B", "--user", "A");
|
||||
testExpectedEntityTypeNames(Arrays.asList(ConfigType.CLIENT.value(), ConfigType.USER.value()), Arrays.asList("B", ""), connectOpts, "--client", "B", "--user-defaults");
|
||||
testExpectedEntityTypeNames(Arrays.asList(ConfigType.CLIENT.value(), ConfigType.USER.value()), Collections.singletonList("A"), connectOpts,
|
||||
testExpectedEntityTypeNames(List.of(ConfigType.TOPIC.value()), List.of("A"), connectOpts, "--topic", "A");
|
||||
testExpectedEntityTypeNames(List.of(ConfigType.IP.value()), List.of("1.2.3.4"), connectOpts, "--ip", "1.2.3.4");
|
||||
testExpectedEntityTypeNames(List.of(ConfigType.GROUP.value()), List.of("A"), connectOpts, "--group", "A");
|
||||
testExpectedEntityTypeNames(List.of(ConfigType.CLIENT.value(), ConfigType.USER.value()), List.of("B", "A"), connectOpts, "--client", "B", "--user", "A");
|
||||
testExpectedEntityTypeNames(List.of(ConfigType.CLIENT.value(), ConfigType.USER.value()), List.of("B", ""), connectOpts, "--client", "B", "--user-defaults");
|
||||
testExpectedEntityTypeNames(List.of(ConfigType.CLIENT.value(), ConfigType.USER.value()), List.of("A"), connectOpts,
|
||||
"--entity-type", "clients", "--entity-type", "users", "--entity-name", "A");
|
||||
testExpectedEntityTypeNames(Collections.singletonList(ConfigType.TOPIC.value()), Collections.emptyList(), connectOpts, "--entity-type", "topics");
|
||||
testExpectedEntityTypeNames(Collections.singletonList(ConfigType.IP.value()), Collections.emptyList(), connectOpts, "--entity-type", "ips");
|
||||
testExpectedEntityTypeNames(Collections.singletonList(ConfigType.GROUP.value()), Collections.emptyList(), connectOpts, "--entity-type", "groups");
|
||||
testExpectedEntityTypeNames(Collections.singletonList(ConfigType.CLIENT_METRICS.value()), Collections.emptyList(), connectOpts, "--entity-type", "client-metrics");
|
||||
testExpectedEntityTypeNames(Collections.singletonList(ConfigType.BROKER.value()), Collections.singletonList("0"), connectOpts, "--entity-name", "0", "--entity-type", "brokers");
|
||||
testExpectedEntityTypeNames(Collections.singletonList(ConfigType.BROKER.value()), Collections.singletonList("0"), connectOpts, "--broker", "0");
|
||||
testExpectedEntityTypeNames(Collections.singletonList(ConfigType.USER.value()), Collections.emptyList(), connectOpts, "--entity-type", "users");
|
||||
testExpectedEntityTypeNames(Collections.singletonList(ConfigType.BROKER.value()), Collections.emptyList(), connectOpts, "--entity-type", "brokers");
|
||||
testExpectedEntityTypeNames(List.of(ConfigType.TOPIC.value()), List.of(), connectOpts, "--entity-type", "topics");
|
||||
testExpectedEntityTypeNames(List.of(ConfigType.IP.value()), List.of(), connectOpts, "--entity-type", "ips");
|
||||
testExpectedEntityTypeNames(List.of(ConfigType.GROUP.value()), List.of(), connectOpts, "--entity-type", "groups");
|
||||
testExpectedEntityTypeNames(List.of(ConfigType.CLIENT_METRICS.value()), List.of(), connectOpts, "--entity-type", "client-metrics");
|
||||
testExpectedEntityTypeNames(List.of(ConfigType.BROKER.value()), List.of("0"), connectOpts, "--entity-name", "0", "--entity-type", "brokers");
|
||||
testExpectedEntityTypeNames(List.of(ConfigType.BROKER.value()), List.of("0"), connectOpts, "--broker", "0");
|
||||
testExpectedEntityTypeNames(List.of(ConfigType.USER.value()), List.of(), connectOpts, "--entity-type", "users");
|
||||
testExpectedEntityTypeNames(List.of(ConfigType.BROKER.value()), List.of(), connectOpts, "--entity-type", "brokers");
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -501,31 +500,23 @@ public class ConfigCommandTest {
|
|||
}
|
||||
|
||||
private Entry<List<String>, Map<String, String>> argsAndExpectedEntity(Optional<String> entityName, String entityType) {
|
||||
String command;
|
||||
switch (entityType) {
|
||||
case ClientQuotaEntity.USER:
|
||||
command = "users";
|
||||
break;
|
||||
case ClientQuotaEntity.CLIENT_ID:
|
||||
command = "clients";
|
||||
break;
|
||||
case ClientQuotaEntity.IP:
|
||||
command = "ips";
|
||||
break;
|
||||
default:
|
||||
throw new IllegalArgumentException("Unknown command: " + entityType);
|
||||
}
|
||||
String command = switch (entityType) {
|
||||
case ClientQuotaEntity.USER -> ConfigType.USER.value();
|
||||
case ClientQuotaEntity.CLIENT_ID -> ConfigType.CLIENT.value();
|
||||
case ClientQuotaEntity.IP -> ConfigType.IP.value();
|
||||
default -> throw new IllegalArgumentException("Unknown command: " + entityType);
|
||||
};
|
||||
|
||||
return entityName.map(name -> {
|
||||
if (name.isEmpty())
|
||||
return new SimpleImmutableEntry<>(Arrays.asList("--entity-type", command, "--entity-default"), Collections.singletonMap(entityType, (String) null));
|
||||
return new SimpleImmutableEntry<>(Arrays.asList("--entity-type", command, "--entity-name", name), Collections.singletonMap(entityType, name));
|
||||
}).orElse(new SimpleImmutableEntry<>(Collections.emptyList(), Collections.emptyMap()));
|
||||
return new SimpleImmutableEntry<>(List.of("--entity-type", command, "--entity-default"), Collections.singletonMap(entityType, (String) null));
|
||||
return new SimpleImmutableEntry<>(List.of("--entity-type", command, "--entity-name", name), Map.of(entityType, name));
|
||||
}).orElse(new SimpleImmutableEntry<>(List.of(), Map.of()));
|
||||
}
|
||||
|
||||
private void verifyAlterCommandFails(String expectedErrorMessage, List<String> alterOpts) {
|
||||
Admin mockAdminClient = mock(Admin.class);
|
||||
ConfigCommand.ConfigCommandOptions opts = new ConfigCommand.ConfigCommandOptions(toArray(Arrays.asList("--bootstrap-server", "localhost:9092",
|
||||
ConfigCommand.ConfigCommandOptions opts = new ConfigCommand.ConfigCommandOptions(toArray(List.of("--bootstrap-server", "localhost:9092",
|
||||
"--alter"), alterOpts));
|
||||
IllegalArgumentException e = assertThrows(IllegalArgumentException.class, () -> ConfigCommand.alterConfig(mockAdminClient, opts));
|
||||
assertTrue(e.getMessage().contains(expectedErrorMessage), "Unexpected exception: " + e);
|
||||
|
@ -535,25 +526,25 @@ public class ConfigCommandTest {
|
|||
public void shouldNotAlterNonQuotaIpConfigsUsingBootstrapServer() {
|
||||
// when using --bootstrap-server, it should be illegal to alter anything that is not a connection quota
|
||||
// for ip entities
|
||||
List<String> ipEntityOpts = Arrays.asList("--entity-type", "ips", "--entity-name", "127.0.0.1");
|
||||
List<String> ipEntityOpts = List.of("--entity-type", "ips", "--entity-name", "127.0.0.1");
|
||||
String invalidProp = "some_config";
|
||||
verifyAlterCommandFails(invalidProp, concat(ipEntityOpts, Arrays.asList("--add-config", "connection_creation_rate=10000,some_config=10")));
|
||||
verifyAlterCommandFails(invalidProp, concat(ipEntityOpts, Arrays.asList("--add-config", "some_config=10")));
|
||||
verifyAlterCommandFails(invalidProp, concat(ipEntityOpts, Arrays.asList("--delete-config", "connection_creation_rate=10000,some_config=10")));
|
||||
verifyAlterCommandFails(invalidProp, concat(ipEntityOpts, Arrays.asList("--delete-config", "some_config=10")));
|
||||
verifyAlterCommandFails(invalidProp, concat(ipEntityOpts, List.of("--add-config", "connection_creation_rate=10000,some_config=10")));
|
||||
verifyAlterCommandFails(invalidProp, concat(ipEntityOpts, List.of("--add-config", "some_config=10")));
|
||||
verifyAlterCommandFails(invalidProp, concat(ipEntityOpts, List.of("--delete-config", "connection_creation_rate=10000,some_config=10")));
|
||||
verifyAlterCommandFails(invalidProp, concat(ipEntityOpts, List.of("--delete-config", "some_config=10")));
|
||||
}
|
||||
|
||||
private void verifyDescribeQuotas(List<String> describeArgs, ClientQuotaFilter expectedFilter) {
|
||||
ConfigCommand.ConfigCommandOptions describeOpts = new ConfigCommand.ConfigCommandOptions(toArray(Arrays.asList("--bootstrap-server", "localhost:9092",
|
||||
ConfigCommand.ConfigCommandOptions describeOpts = new ConfigCommand.ConfigCommandOptions(toArray(List.of("--bootstrap-server", "localhost:9092",
|
||||
"--describe"), describeArgs));
|
||||
KafkaFutureImpl<Map<ClientQuotaEntity, Map<String, Double>>> describeFuture = new KafkaFutureImpl<>();
|
||||
describeFuture.complete(Collections.emptyMap());
|
||||
describeFuture.complete(Map.of());
|
||||
DescribeClientQuotasResult describeResult = mock(DescribeClientQuotasResult.class);
|
||||
when(describeResult.entities()).thenReturn(describeFuture);
|
||||
|
||||
AtomicBoolean describedConfigs = new AtomicBoolean();
|
||||
Node node = new Node(1, "localhost", 9092);
|
||||
MockAdminClient mockAdminClient = new MockAdminClient(Collections.singletonList(node), node) {
|
||||
MockAdminClient mockAdminClient = new MockAdminClient(List.of(node), node) {
|
||||
@Override
|
||||
public DescribeClientQuotasResult describeClientQuotas(ClientQuotaFilter filter, DescribeClientQuotasOptions options) {
|
||||
assertTrue(filter.strict());
|
||||
|
@ -570,24 +561,24 @@ public class ConfigCommandTest {
|
|||
public void testDescribeIpConfigs() {
|
||||
String entityType = ClientQuotaEntity.IP;
|
||||
String knownHost = "1.2.3.4";
|
||||
ClientQuotaFilter defaultIpFilter = ClientQuotaFilter.containsOnly(Collections.singletonList(ClientQuotaFilterComponent.ofDefaultEntity(entityType)));
|
||||
ClientQuotaFilter singleIpFilter = ClientQuotaFilter.containsOnly(Collections.singletonList(ClientQuotaFilterComponent.ofEntity(entityType, knownHost)));
|
||||
ClientQuotaFilter allIpsFilter = ClientQuotaFilter.containsOnly(Collections.singletonList(ClientQuotaFilterComponent.ofEntityType(entityType)));
|
||||
verifyDescribeQuotas(Arrays.asList("--entity-default", "--entity-type", "ips"), defaultIpFilter);
|
||||
verifyDescribeQuotas(Collections.singletonList("--ip-defaults"), defaultIpFilter);
|
||||
verifyDescribeQuotas(Arrays.asList("--entity-type", "ips", "--entity-name", knownHost), singleIpFilter);
|
||||
verifyDescribeQuotas(Arrays.asList("--ip", knownHost), singleIpFilter);
|
||||
verifyDescribeQuotas(Arrays.asList("--entity-type", "ips"), allIpsFilter);
|
||||
ClientQuotaFilter defaultIpFilter = ClientQuotaFilter.containsOnly(List.of(ClientQuotaFilterComponent.ofDefaultEntity(entityType)));
|
||||
ClientQuotaFilter singleIpFilter = ClientQuotaFilter.containsOnly(List.of(ClientQuotaFilterComponent.ofEntity(entityType, knownHost)));
|
||||
ClientQuotaFilter allIpsFilter = ClientQuotaFilter.containsOnly(List.of(ClientQuotaFilterComponent.ofEntityType(entityType)));
|
||||
verifyDescribeQuotas(List.of("--entity-default", "--entity-type", "ips"), defaultIpFilter);
|
||||
verifyDescribeQuotas(List.of("--ip-defaults"), defaultIpFilter);
|
||||
verifyDescribeQuotas(List.of("--entity-type", "ips", "--entity-name", knownHost), singleIpFilter);
|
||||
verifyDescribeQuotas(List.of("--ip", knownHost), singleIpFilter);
|
||||
verifyDescribeQuotas(List.of("--entity-type", "ips"), allIpsFilter);
|
||||
}
|
||||
|
||||
public void verifyAlterQuotas(List<String> alterOpts, ClientQuotaEntity expectedAlterEntity,
|
||||
Map<String, Double> expectedProps, Set<ClientQuotaAlteration.Op> expectedAlterOps) {
|
||||
ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(toArray(Arrays.asList("--bootstrap-server", "localhost:9092",
|
||||
ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(toArray(List.of("--bootstrap-server", "localhost:9092",
|
||||
"--alter"), alterOpts));
|
||||
|
||||
AtomicBoolean describedConfigs = new AtomicBoolean();
|
||||
KafkaFutureImpl<Map<ClientQuotaEntity, Map<String, Double>>> describeFuture = new KafkaFutureImpl<>();
|
||||
describeFuture.complete(Collections.singletonMap(expectedAlterEntity, expectedProps));
|
||||
describeFuture.complete(Map.of(expectedAlterEntity, expectedProps));
|
||||
DescribeClientQuotasResult describeResult = mock(DescribeClientQuotasResult.class);
|
||||
when(describeResult.entities()).thenReturn(describeFuture);
|
||||
|
||||
|
@ -606,7 +597,7 @@ public class ConfigCommandTest {
|
|||
when(alterResult.all()).thenReturn(alterFuture);
|
||||
|
||||
Node node = new Node(1, "localhost", 9092);
|
||||
MockAdminClient mockAdminClient = new MockAdminClient(Collections.singletonList(node), node) {
|
||||
MockAdminClient mockAdminClient = new MockAdminClient(List.of(node), node) {
|
||||
@Override
|
||||
public DescribeClientQuotasResult describeClientQuotas(ClientQuotaFilter filter, DescribeClientQuotasOptions options) {
|
||||
assertTrue(filter.strict());
|
||||
|
@ -638,11 +629,11 @@ public class ConfigCommandTest {
|
|||
Entry<List<String>, Map<String, String>> defaultIpArgsAndEntity = argsAndExpectedEntity(Optional.of(""), ClientQuotaEntity.IP);
|
||||
|
||||
|
||||
List<String> deleteArgs = Arrays.asList("--delete-config", "connection_creation_rate");
|
||||
List<String> deleteArgs = List.of("--delete-config", "connection_creation_rate");
|
||||
Set<ClientQuotaAlteration.Op> deleteAlterationOps = Set.of(new ClientQuotaAlteration.Op("connection_creation_rate", null));
|
||||
Map<String, Double> propsToDelete = Collections.singletonMap("connection_creation_rate", 50.0);
|
||||
Map<String, Double> propsToDelete = Map.of("connection_creation_rate", 50.0);
|
||||
|
||||
List<String> addArgs = Arrays.asList("--add-config", "connection_creation_rate=100");
|
||||
List<String> addArgs = List.of("--add-config", "connection_creation_rate=100");
|
||||
Set<ClientQuotaAlteration.Op> addAlterationOps = Set.of(new ClientQuotaAlteration.Op("connection_creation_rate", 100.0));
|
||||
|
||||
verifyAlterQuotas(
|
||||
|
@ -653,7 +644,7 @@ public class ConfigCommandTest {
|
|||
verifyAlterQuotas(
|
||||
concat(singleIpArgsAndEntity.getKey(), addArgs),
|
||||
new ClientQuotaEntity(singleIpArgsAndEntity.getValue()),
|
||||
Collections.emptyMap(),
|
||||
Map.of(),
|
||||
addAlterationOps);
|
||||
verifyAlterQuotas(
|
||||
concat(defaultIpArgsAndEntity.getKey(), deleteArgs),
|
||||
|
@ -663,14 +654,14 @@ public class ConfigCommandTest {
|
|||
verifyAlterQuotas(
|
||||
concat(defaultIpArgsAndEntity.getKey(), addArgs),
|
||||
new ClientQuotaEntity(defaultIpArgsAndEntity.getValue()),
|
||||
Collections.emptyMap(),
|
||||
Map.of(),
|
||||
addAlterationOps);
|
||||
}
|
||||
|
||||
private void verifyAlterUserClientQuotas(String user, String client) {
|
||||
List<String> alterArgs = Arrays.asList("--add-config", "consumer_byte_rate=20000,producer_byte_rate=10000",
|
||||
List<String> alterArgs = List.of("--add-config", "consumer_byte_rate=20000,producer_byte_rate=10000",
|
||||
"--delete-config", "request_percentage");
|
||||
Map<String, Double> propsToDelete = Collections.singletonMap("request_percentage", 50.0);
|
||||
Map<String, Double> propsToDelete = Map.of("request_percentage", 50.0);
|
||||
|
||||
Set<ClientQuotaAlteration.Op> alterationOps = Set.of(
|
||||
new ClientQuotaAlteration.Op("consumer_byte_rate", 20000d),
|
||||
|
@ -700,10 +691,10 @@ public class ConfigCommandTest {
|
|||
verifyAlterUserClientQuotas(null, "");
|
||||
}
|
||||
|
||||
private final List<String> userEntityOpts = Arrays.asList("--entity-type", "users", "--entity-name", "admin");
|
||||
private final List<String> clientEntityOpts = Arrays.asList("--entity-type", "clients", "--entity-name", "admin");
|
||||
private final List<String> addScramOpts = Arrays.asList("--add-config", "SCRAM-SHA-256=[iterations=8192,password=foo-secret]");
|
||||
private final List<String> deleteScramOpts = Arrays.asList("--delete-config", "SCRAM-SHA-256");
|
||||
private final List<String> userEntityOpts = List.of("--entity-type", "users", "--entity-name", "admin");
|
||||
private final List<String> clientEntityOpts = List.of("--entity-type", "clients", "--entity-name", "admin");
|
||||
private final List<String> addScramOpts = List.of("--add-config", "SCRAM-SHA-256=[iterations=8192,password=foo-secret]");
|
||||
private final List<String> deleteScramOpts = List.of("--delete-config", "SCRAM-SHA-256");
|
||||
|
||||
@Test
|
||||
public void shouldNotAlterNonQuotaNonScramUserOrClientConfigUsingBootstrapServer() {
|
||||
|
@ -711,13 +702,13 @@ public class ConfigCommandTest {
|
|||
// for both user and client entities
|
||||
String invalidProp = "some_config";
|
||||
verifyAlterCommandFails(invalidProp, concat(userEntityOpts,
|
||||
Arrays.asList("-add-config", "consumer_byte_rate=20000,producer_byte_rate=10000,some_config=10")));
|
||||
List.of("-add-config", "consumer_byte_rate=20000,producer_byte_rate=10000,some_config=10")));
|
||||
verifyAlterCommandFails(invalidProp, concat(userEntityOpts,
|
||||
Arrays.asList("--add-config", "consumer_byte_rate=20000,producer_byte_rate=10000,some_config=10")));
|
||||
verifyAlterCommandFails(invalidProp, concat(clientEntityOpts, Arrays.asList("--add-config", "some_config=10")));
|
||||
verifyAlterCommandFails(invalidProp, concat(userEntityOpts, Arrays.asList("--delete-config", "consumer_byte_rate,some_config")));
|
||||
verifyAlterCommandFails(invalidProp, concat(userEntityOpts, Arrays.asList("--delete-config", "SCRAM-SHA-256,some_config")));
|
||||
verifyAlterCommandFails(invalidProp, concat(clientEntityOpts, Arrays.asList("--delete-config", "some_config")));
|
||||
List.of("--add-config", "consumer_byte_rate=20000,producer_byte_rate=10000,some_config=10")));
|
||||
verifyAlterCommandFails(invalidProp, concat(clientEntityOpts, List.of("--add-config", "some_config=10")));
|
||||
verifyAlterCommandFails(invalidProp, concat(userEntityOpts, List.of("--delete-config", "consumer_byte_rate,some_config")));
|
||||
verifyAlterCommandFails(invalidProp, concat(userEntityOpts, List.of("--delete-config", "SCRAM-SHA-256,some_config")));
|
||||
verifyAlterCommandFails(invalidProp, concat(clientEntityOpts, List.of("--delete-config", "some_config")));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -731,16 +722,16 @@ public class ConfigCommandTest {
|
|||
public void shouldNotCreateUserScramCredentialConfigWithUnderMinimumIterationsUsingBootstrapServer() {
|
||||
// when using --bootstrap-server, it should be illegal to create a SCRAM credential for a user
|
||||
// with an iterations value less than the minimum
|
||||
verifyAlterCommandFails("SCRAM-SHA-256", concat(userEntityOpts, Arrays.asList("--add-config", "SCRAM-SHA-256=[iterations=100,password=foo-secret]")));
|
||||
verifyAlterCommandFails("SCRAM-SHA-256", concat(userEntityOpts, List.of("--add-config", "SCRAM-SHA-256=[iterations=100,password=foo-secret]")));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldNotAlterUserScramCredentialAndClientQuotaConfigsSimultaneouslyUsingBootstrapServer() {
|
||||
// when using --bootstrap-server, it should be illegal to alter both SCRAM credentials and quotas for user entities
|
||||
String expectedErrorMessage = "SCRAM-SHA-256";
|
||||
List<String> secondUserEntityOpts = Arrays.asList("--entity-type", "users", "--entity-name", "admin1");
|
||||
List<String> addQuotaOpts = Arrays.asList("--add-config", "consumer_byte_rate=20000");
|
||||
List<String> deleteQuotaOpts = Arrays.asList("--delete-config", "consumer_byte_rate");
|
||||
List<String> secondUserEntityOpts = List.of("--entity-type", "users", "--entity-name", "admin1");
|
||||
List<String> addQuotaOpts = List.of("--add-config", "consumer_byte_rate=20000");
|
||||
List<String> deleteQuotaOpts = List.of("--delete-config", "consumer_byte_rate");
|
||||
|
||||
verifyAlterCommandFails(expectedErrorMessage, concat(userEntityOpts, addScramOpts, userEntityOpts, deleteQuotaOpts));
|
||||
verifyAlterCommandFails(expectedErrorMessage, concat(userEntityOpts, addScramOpts, secondUserEntityOpts, deleteQuotaOpts));
|
||||
|
@ -758,11 +749,11 @@ public class ConfigCommandTest {
|
|||
// User SCRAM credentials should not be described when specifying
|
||||
// --describe --entity-type users --entity-default (or --user-defaults) with --bootstrap-server
|
||||
KafkaFutureImpl<Map<ClientQuotaEntity, Map<String, Double>>> describeFuture = new KafkaFutureImpl<>();
|
||||
describeFuture.complete(Collections.singletonMap(new ClientQuotaEntity(Collections.singletonMap("", "")), Collections.singletonMap("request_percentage", 50.0)));
|
||||
describeFuture.complete(Map.of(new ClientQuotaEntity(Map.of("", "")), Map.of("request_percentage", 50.0)));
|
||||
DescribeClientQuotasResult describeClientQuotasResult = mock(DescribeClientQuotasResult.class);
|
||||
when(describeClientQuotasResult.entities()).thenReturn(describeFuture);
|
||||
Node node = new Node(1, "localhost", 9092);
|
||||
MockAdminClient mockAdminClient = new MockAdminClient(Collections.singletonList(node), node) {
|
||||
MockAdminClient mockAdminClient = new MockAdminClient(List.of(node), node) {
|
||||
@Override
|
||||
public DescribeClientQuotasResult describeClientQuotas(ClientQuotaFilter filter, DescribeClientQuotasOptions options) {
|
||||
return describeClientQuotasResult;
|
||||
|
@ -773,15 +764,15 @@ public class ConfigCommandTest {
|
|||
throw new IllegalStateException("Incorrectly described SCRAM credentials when specifying --entity-default with --bootstrap-server");
|
||||
}
|
||||
};
|
||||
ConfigCommand.ConfigCommandOptions opts = new ConfigCommand.ConfigCommandOptions(toArray(Arrays.asList("--bootstrap-server", "localhost:9092", "--describe"), requestOpts));
|
||||
ConfigCommand.ConfigCommandOptions opts = new ConfigCommand.ConfigCommandOptions(toArray(List.of("--bootstrap-server", "localhost:9092", "--describe"), requestOpts));
|
||||
ConfigCommand.describeConfig(mockAdminClient, opts); // fails if describeUserScramCredentials() is invoked
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldNotDescribeUserScramCredentialsWithEntityDefaultUsingBootstrapServer() {
|
||||
String expectedMsg = "The use of --entity-default or --user-defaults is not allowed with User SCRAM Credentials using --bootstrap-server.";
|
||||
List<String> defaultUserOpt = Collections.singletonList("--user-defaults");
|
||||
List<String> verboseDefaultUserOpts = Arrays.asList("--entity-type", "users", "--entity-default");
|
||||
List<String> defaultUserOpt = List.of("--user-defaults");
|
||||
List<String> verboseDefaultUserOpts = List.of("--entity-type", "users", "--entity-default");
|
||||
verifyAlterCommandFails(expectedMsg, concat(verboseDefaultUserOpts, addScramOpts));
|
||||
verifyAlterCommandFails(expectedMsg, concat(verboseDefaultUserOpts, deleteScramOpts));
|
||||
verifyUserScramCredentialsNotDescribed(verboseDefaultUserOpts);
|
||||
|
@ -813,9 +804,9 @@ public class ConfigCommandTest {
|
|||
AtomicBoolean alteredConfigs = new AtomicBoolean();
|
||||
|
||||
ConfigResource resource = new ConfigResource(ConfigResource.Type.TOPIC, resourceName);
|
||||
List<ConfigEntry> configEntries = Arrays.asList(newConfigEntry("min.insync.replicas", "1"), newConfigEntry("unclean.leader.election.enable", "1"));
|
||||
List<ConfigEntry> configEntries = List.of(newConfigEntry("min.insync.replicas", "1"), newConfigEntry("unclean.leader.election.enable", "1"));
|
||||
KafkaFutureImpl<Map<ConfigResource, Config>> future = new KafkaFutureImpl<>();
|
||||
future.complete(Collections.singletonMap(resource, new Config(configEntries)));
|
||||
future.complete(Map.of(resource, new Config(configEntries)));
|
||||
DescribeConfigsResult describeResult = mock(DescribeConfigsResult.class);
|
||||
when(describeResult.all()).thenReturn(future);
|
||||
|
||||
|
@ -825,14 +816,14 @@ public class ConfigCommandTest {
|
|||
when(alterResult.all()).thenReturn(alterFuture);
|
||||
|
||||
Node node = new Node(1, "localhost", 9092);
|
||||
MockAdminClient mockAdminClient = new MockAdminClient(Collections.singletonList(node), node) {
|
||||
MockAdminClient mockAdminClient = new MockAdminClient(List.of(node), node) {
|
||||
@Override
|
||||
public synchronized DescribeConfigsResult describeConfigs(Collection<ConfigResource> resources, DescribeConfigsOptions options) {
|
||||
assertFalse(options.includeSynonyms(), "Config synonyms requested unnecessarily");
|
||||
assertEquals(1, resources.size());
|
||||
ConfigResource res = resources.iterator().next();
|
||||
assertEquals(res.type(), ConfigResource.Type.TOPIC);
|
||||
assertEquals(res.name(), resourceName);
|
||||
assertEquals(ConfigResource.Type.TOPIC, res.type());
|
||||
assertEquals(resourceName, res.name());
|
||||
return describeResult;
|
||||
}
|
||||
|
||||
|
@ -869,7 +860,7 @@ public class ConfigCommandTest {
|
|||
}
|
||||
|
||||
public ConfigEntry newConfigEntry(String name, String value) {
|
||||
return ConfigTest.newConfigEntry(name, value, ConfigEntry.ConfigSource.DYNAMIC_TOPIC_CONFIG, false, false, Collections.emptyList());
|
||||
return ConfigTest.newConfigEntry(name, value, ConfigEntry.ConfigSource.DYNAMIC_TOPIC_CONFIG, false, false, List.of());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -883,16 +874,16 @@ public class ConfigCommandTest {
|
|||
|
||||
ConfigResource resource = new ConfigResource(ConfigResource.Type.TOPIC, resourceName);
|
||||
KafkaFutureImpl<Map<ConfigResource, Config>> future = new KafkaFutureImpl<>();
|
||||
future.complete(Collections.singletonMap(resource, new Config(Collections.emptyList())));
|
||||
future.complete(Map.of(resource, new Config(List.of())));
|
||||
DescribeConfigsResult describeResult = mock(DescribeConfigsResult.class);
|
||||
when(describeResult.all()).thenReturn(future);
|
||||
|
||||
Node node = new Node(1, "localhost", 9092);
|
||||
MockAdminClient mockAdminClient = new MockAdminClient(Collections.singletonList(node), node) {
|
||||
MockAdminClient mockAdminClient = new MockAdminClient(List.of(node), node) {
|
||||
@Override
|
||||
public synchronized DescribeConfigsResult describeConfigs(Collection<ConfigResource> resources, DescribeConfigsOptions options) {
|
||||
assertTrue(options.includeSynonyms(), "Synonyms not requested");
|
||||
assertEquals(Collections.singleton(resource), new HashSet<>(resources));
|
||||
assertEquals(Set.of(resource), new HashSet<>(resources));
|
||||
return describeResult;
|
||||
}
|
||||
};
|
||||
|
@ -903,7 +894,7 @@ public class ConfigCommandTest {
|
|||
@Test
|
||||
public void shouldAddBrokerLoggerConfig() {
|
||||
Node node = new Node(1, "localhost", 9092);
|
||||
verifyAlterBrokerLoggerConfig(node, "1", "1", Arrays.asList(
|
||||
verifyAlterBrokerLoggerConfig(node, "1", "1", List.of(
|
||||
new ConfigEntry("kafka.log.LogCleaner", "INFO"),
|
||||
new ConfigEntry("kafka.server.ReplicaManager", "INFO"),
|
||||
new ConfigEntry("kafka.server.KafkaApi", "INFO")
|
||||
|
@ -970,7 +961,7 @@ public class ConfigCommandTest {
|
|||
Node node = new Node(1, "localhost", 9092);
|
||||
// verifyAlterBrokerLoggerConfig tries to alter kafka.log.LogCleaner, kafka.server.ReplicaManager and kafka.server.KafkaApi
|
||||
// yet, we make it so DescribeConfigs returns only one logger, implying that kafka.server.ReplicaManager and kafka.log.LogCleaner are invalid
|
||||
assertThrows(InvalidConfigurationException.class, () -> verifyAlterBrokerLoggerConfig(node, "1", "1", Collections.singletonList(
|
||||
assertThrows(InvalidConfigurationException.class, () -> verifyAlterBrokerLoggerConfig(node, "1", "1", List.of(
|
||||
new ConfigEntry("kafka.server.KafkaApi", "INFO")
|
||||
)));
|
||||
}
|
||||
|
@ -978,17 +969,17 @@ public class ConfigCommandTest {
|
|||
@Test
|
||||
public void shouldAddDefaultBrokerDynamicConfig() {
|
||||
Node node = new Node(1, "localhost", 9092);
|
||||
verifyAlterBrokerConfig(node, "", Collections.singletonList("--entity-default"));
|
||||
verifyAlterBrokerConfig(node, "", List.of("--entity-default"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldAddBrokerDynamicConfig() {
|
||||
Node node = new Node(1, "localhost", 9092);
|
||||
verifyAlterBrokerConfig(node, "1", Arrays.asList("--entity-name", "1"));
|
||||
verifyAlterBrokerConfig(node, "1", List.of("--entity-name", "1"));
|
||||
}
|
||||
|
||||
public void verifyAlterBrokerConfig(Node node, String resourceName, List<String> resourceOpts) {
|
||||
String[] optsList = toArray(Arrays.asList("--bootstrap-server", "localhost:9092",
|
||||
String[] optsList = toArray(List.of("--bootstrap-server", "localhost:9092",
|
||||
"--entity-type", "brokers",
|
||||
"--alter",
|
||||
"--add-config", "message.max.bytes=10,leader.replication.throttled.rate=10"), resourceOpts);
|
||||
|
@ -997,9 +988,9 @@ public class ConfigCommandTest {
|
|||
brokerConfigs.put("num.io.threads", "5");
|
||||
|
||||
ConfigResource resource = new ConfigResource(ConfigResource.Type.BROKER, resourceName);
|
||||
List<ConfigEntry> configEntries = Collections.singletonList(new ConfigEntry("num.io.threads", "5"));
|
||||
List<ConfigEntry> configEntries = List.of(new ConfigEntry("num.io.threads", "5"));
|
||||
KafkaFutureImpl<Map<ConfigResource, Config>> future = new KafkaFutureImpl<>();
|
||||
future.complete(Collections.singletonMap(resource, new Config(configEntries)));
|
||||
future.complete(Map.of(resource, new Config(configEntries)));
|
||||
DescribeConfigsResult describeResult = mock(DescribeConfigsResult.class);
|
||||
when(describeResult.all()).thenReturn(future);
|
||||
|
||||
|
@ -1008,7 +999,7 @@ public class ConfigCommandTest {
|
|||
AlterConfigsResult alterResult = mock(AlterConfigsResult.class);
|
||||
when(alterResult.all()).thenReturn(alterFuture);
|
||||
|
||||
MockAdminClient mockAdminClient = new MockAdminClient(Collections.singletonList(node), node) {
|
||||
MockAdminClient mockAdminClient = new MockAdminClient(List.of(node), node) {
|
||||
@Override
|
||||
public synchronized DescribeConfigsResult describeConfigs(Collection<ConfigResource> resources, DescribeConfigsOptions options) {
|
||||
assertFalse(options.includeSynonyms(), "Config synonyms requested unnecessarily");
|
||||
|
@ -1049,7 +1040,7 @@ public class ConfigCommandTest {
|
|||
ConfigResource resourceCustom = new ConfigResource(ConfigResource.Type.BROKER, "1");
|
||||
ConfigResource resourceDefault = new ConfigResource(ConfigResource.Type.BROKER, brokerDefaultEntityName);
|
||||
KafkaFutureImpl<Map<ConfigResource, Config>> future = new KafkaFutureImpl<>();
|
||||
Config emptyConfig = new Config(Collections.emptyList());
|
||||
Config emptyConfig = new Config(List.of());
|
||||
Map<ConfigResource, Config> resultMap = new HashMap<>();
|
||||
resultMap.put(resourceCustom, emptyConfig);
|
||||
resultMap.put(resourceDefault, emptyConfig);
|
||||
|
@ -1059,7 +1050,7 @@ public class ConfigCommandTest {
|
|||
when(describeResult.all()).thenReturn(future);
|
||||
|
||||
Node node = new Node(1, "localhost", 9092);
|
||||
MockAdminClient mockAdminClient = new MockAdminClient(Collections.singletonList(node), node) {
|
||||
MockAdminClient mockAdminClient = new MockAdminClient(List.of(node), node) {
|
||||
@Override
|
||||
public synchronized DescribeConfigsResult describeConfigs(Collection<ConfigResource> resources, DescribeConfigsOptions options) {
|
||||
assertTrue(options.includeSynonyms(), "Synonyms not requested");
|
||||
|
@ -1087,7 +1078,7 @@ public class ConfigCommandTest {
|
|||
|
||||
ConfigResource resource = new ConfigResource(ConfigResource.Type.BROKER_LOGGER, resourceName);
|
||||
KafkaFutureImpl<Map<ConfigResource, Config>> future = new KafkaFutureImpl<>();
|
||||
future.complete(Collections.singletonMap(resource, new Config(describeConfigEntries)));
|
||||
future.complete(Map.of(resource, new Config(describeConfigEntries)));
|
||||
DescribeConfigsResult describeResult = mock(DescribeConfigsResult.class);
|
||||
when(describeResult.all()).thenReturn(future);
|
||||
|
||||
|
@ -1096,7 +1087,7 @@ public class ConfigCommandTest {
|
|||
AlterConfigsResult alterResult = mock(AlterConfigsResult.class);
|
||||
when(alterResult.all()).thenReturn(alterFuture);
|
||||
|
||||
MockAdminClient mockAdminClient = new MockAdminClient(Collections.singletonList(node), node) {
|
||||
MockAdminClient mockAdminClient = new MockAdminClient(List.of(node), node) {
|
||||
@Override
|
||||
public synchronized DescribeConfigsResult describeConfigs(Collection<ConfigResource> resources, DescribeConfigsOptions options) {
|
||||
assertEquals(1, resources.size());
|
||||
|
@ -1115,7 +1106,7 @@ public class ConfigCommandTest {
|
|||
assertEquals(ConfigResource.Type.BROKER_LOGGER, res.type());
|
||||
assertEquals(3, alterConfigOps.size());
|
||||
|
||||
List<AlterConfigOp> expectedConfigOps = Arrays.asList(
|
||||
List<AlterConfigOp> expectedConfigOps = List.of(
|
||||
new AlterConfigOp(new ConfigEntry("kafka.server.ReplicaManager", ""), AlterConfigOp.OpType.DELETE),
|
||||
new AlterConfigOp(new ConfigEntry("kafka.server.KafkaApi", ""), AlterConfigOp.OpType.DELETE),
|
||||
new AlterConfigOp(new ConfigEntry("kafka.log.LogCleaner", "DEBUG"), AlterConfigOp.OpType.SET)
|
||||
|
@ -1174,14 +1165,14 @@ public class ConfigCommandTest {
|
|||
"--delete-config", "missing_config1, missing_config2"));
|
||||
|
||||
ConfigResource resource = new ConfigResource(ConfigResource.Type.TOPIC, resourceName);
|
||||
List<ConfigEntry> configEntries = Collections.emptyList();
|
||||
List<ConfigEntry> configEntries = List.of();
|
||||
KafkaFutureImpl<Map<ConfigResource, Config>> future = new KafkaFutureImpl<>();
|
||||
future.complete(Collections.singletonMap(resource, new Config(configEntries)));
|
||||
future.complete(Map.of(resource, new Config(configEntries)));
|
||||
DescribeConfigsResult describeResult = mock(DescribeConfigsResult.class);
|
||||
when(describeResult.all()).thenReturn(future);
|
||||
|
||||
Node node = new Node(1, "localhost", 9092);
|
||||
MockAdminClient mockAdminClient = new MockAdminClient(Collections.singletonList(node), node) {
|
||||
MockAdminClient mockAdminClient = new MockAdminClient(List.of(node), node) {
|
||||
@Override
|
||||
public synchronized DescribeConfigsResult describeConfigs(Collection<ConfigResource> resources, DescribeConfigsOptions options) {
|
||||
assertEquals(1, resources.size());
|
||||
|
@ -1199,15 +1190,15 @@ public class ConfigCommandTest {
|
|||
@Test
|
||||
public void shouldAlterClientMetricsConfig() {
|
||||
Node node = new Node(1, "localhost", 9092);
|
||||
verifyAlterClientMetricsConfig(node, "1", Arrays.asList("--entity-type", "client-metrics", "--entity-name", "1"));
|
||||
verifyAlterClientMetricsConfig(node, "1", List.of("--entity-type", "client-metrics", "--entity-name", "1"));
|
||||
|
||||
// Test for the --client-metrics alias
|
||||
node = new Node(1, "localhost", 9092);
|
||||
verifyAlterClientMetricsConfig(node, "1", Arrays.asList("--client-metrics", "1"));
|
||||
verifyAlterClientMetricsConfig(node, "1", List.of("--client-metrics", "1"));
|
||||
}
|
||||
|
||||
private void verifyAlterClientMetricsConfig(Node node, String resourceName, List<String> resourceOpts) {
|
||||
List<String> optsList = concat(Arrays.asList("--bootstrap-server", "localhost:9092",
|
||||
List<String> optsList = concat(List.of("--bootstrap-server", "localhost:9092",
|
||||
"--alter",
|
||||
"--delete-config", "interval.ms",
|
||||
"--add-config", "metrics=org.apache.kafka.consumer.," +
|
||||
|
@ -1215,11 +1206,11 @@ public class ConfigCommandTest {
|
|||
ConfigCommand.ConfigCommandOptions alterOpts = new ConfigCommand.ConfigCommandOptions(toArray(optsList));
|
||||
|
||||
ConfigResource resource = new ConfigResource(ConfigResource.Type.CLIENT_METRICS, resourceName);
|
||||
List<ConfigEntry> configEntries = Collections.singletonList(new ConfigEntry("interval.ms", "1000",
|
||||
ConfigEntry.ConfigSource.DYNAMIC_CLIENT_METRICS_CONFIG, false, false, Collections.emptyList(),
|
||||
List<ConfigEntry> configEntries = List.of(new ConfigEntry("interval.ms", "1000",
|
||||
ConfigEntry.ConfigSource.DYNAMIC_CLIENT_METRICS_CONFIG, false, false, List.of(),
|
||||
ConfigEntry.ConfigType.UNKNOWN, null));
|
||||
KafkaFutureImpl<Map<ConfigResource, Config>> future = new KafkaFutureImpl<>();
|
||||
future.complete(Collections.singletonMap(resource, new Config(configEntries)));
|
||||
future.complete(Map.of(resource, new Config(configEntries)));
|
||||
DescribeConfigsResult describeResult = mock(DescribeConfigsResult.class);
|
||||
when(describeResult.all()).thenReturn(future);
|
||||
|
||||
|
@ -1228,7 +1219,7 @@ public class ConfigCommandTest {
|
|||
AlterConfigsResult alterResult = mock(AlterConfigsResult.class);
|
||||
when(alterResult.all()).thenReturn(alterFuture);
|
||||
|
||||
MockAdminClient mockAdminClient = new MockAdminClient(Collections.singletonList(node), node) {
|
||||
MockAdminClient mockAdminClient = new MockAdminClient(List.of(node), node) {
|
||||
@Override
|
||||
public synchronized DescribeConfigsResult describeConfigs(Collection<ConfigResource> resources, DescribeConfigsOptions options) {
|
||||
assertFalse(options.includeSynonyms(), "Config synonyms requested unnecessarily");
|
||||
|
@ -1248,7 +1239,7 @@ public class ConfigCommandTest {
|
|||
assertEquals(ConfigResource.Type.CLIENT_METRICS, res.type());
|
||||
assertEquals(3, alterConfigOps.size());
|
||||
|
||||
List<AlterConfigOp> expectedConfigOps = Arrays.asList(
|
||||
List<AlterConfigOp> expectedConfigOps = List.of(
|
||||
new AlterConfigOp(new ConfigEntry("interval.ms", ""), AlterConfigOp.OpType.DELETE),
|
||||
new AlterConfigOp(new ConfigEntry("match", "client_software_name=kafka.python,client_software_version=1\\.2\\..*"), AlterConfigOp.OpType.SET),
|
||||
new AlterConfigOp(new ConfigEntry("metrics", "org.apache.kafka.consumer."), AlterConfigOp.OpType.SET)
|
||||
|
@ -1279,7 +1270,7 @@ public class ConfigCommandTest {
|
|||
when(describeResult.all()).thenReturn(future);
|
||||
|
||||
Node node = new Node(1, "localhost", 9092);
|
||||
MockAdminClient mockAdminClient = new MockAdminClient(Collections.singletonList(node), node) {
|
||||
MockAdminClient mockAdminClient = new MockAdminClient(List.of(node), node) {
|
||||
@Override
|
||||
public synchronized DescribeConfigsResult describeConfigs(Collection<ConfigResource> resources, DescribeConfigsOptions options) {
|
||||
assertTrue(options.includeSynonyms());
|
||||
|
@ -1287,12 +1278,12 @@ public class ConfigCommandTest {
|
|||
ConfigResource resource = resources.iterator().next();
|
||||
assertEquals(ConfigResource.Type.CLIENT_METRICS, resource.type());
|
||||
assertEquals(resourceCustom.name(), resource.name());
|
||||
future.complete(Collections.singletonMap(resourceCustom, new Config(Collections.singletonList(configEntry))));
|
||||
future.complete(Map.of(resourceCustom, new Config(List.of(configEntry))));
|
||||
return describeResult;
|
||||
}
|
||||
};
|
||||
mockAdminClient.incrementalAlterConfigs(Collections.singletonMap(resourceCustom,
|
||||
Collections.singletonList(new AlterConfigOp(configEntry, AlterConfigOp.OpType.SET))), new AlterConfigsOptions());
|
||||
mockAdminClient.incrementalAlterConfigs(Map.of(resourceCustom,
|
||||
List.of(new AlterConfigOp(configEntry, AlterConfigOp.OpType.SET))), new AlterConfigsOptions());
|
||||
ConfigCommand.describeConfig(mockAdminClient, describeOpts);
|
||||
verify(describeResult).all();
|
||||
}
|
||||
|
@ -1311,25 +1302,25 @@ public class ConfigCommandTest {
|
|||
@Test
|
||||
public void shouldAlterGroupConfig() {
|
||||
Node node = new Node(1, "localhost", 9092);
|
||||
verifyAlterGroupConfig(node, "group", Arrays.asList("--entity-type", "groups", "--entity-name", "group"));
|
||||
verifyAlterGroupConfig(node, "group", List.of("--entity-type", "groups", "--entity-name", "group"));
|
||||
|
||||
// Test for the --group alias
|
||||
verifyAlterGroupConfig(node, "groupUsingAlias", Arrays.asList("--group", "groupUsingAlias"));
|
||||
verifyAlterGroupConfig(node, "groupUsingAlias", List.of("--group", "groupUsingAlias"));
|
||||
}
|
||||
|
||||
private void verifyAlterGroupConfig(Node node, String resourceName, List<String> resourceOpts) {
|
||||
List<String> optsList = concat(Arrays.asList("--bootstrap-server", "localhost:9092",
|
||||
List<String> optsList = concat(List.of("--bootstrap-server", "localhost:9092",
|
||||
"--alter",
|
||||
"--delete-config", "consumer.session.timeout.ms",
|
||||
"--add-config", "consumer.heartbeat.interval.ms=6000"), resourceOpts);
|
||||
ConfigCommand.ConfigCommandOptions alterOpts = new ConfigCommand.ConfigCommandOptions(toArray(optsList));
|
||||
|
||||
ConfigResource resource = new ConfigResource(ConfigResource.Type.GROUP, resourceName);
|
||||
List<ConfigEntry> configEntries = Collections.singletonList(new ConfigEntry("consumer.session.timeout.ms", "45000",
|
||||
ConfigEntry.ConfigSource.DYNAMIC_GROUP_CONFIG, false, false, Collections.emptyList(),
|
||||
List<ConfigEntry> configEntries = List.of(new ConfigEntry("consumer.session.timeout.ms", "45000",
|
||||
ConfigEntry.ConfigSource.DYNAMIC_GROUP_CONFIG, false, false, List.of(),
|
||||
ConfigEntry.ConfigType.UNKNOWN, null));
|
||||
KafkaFutureImpl<Map<ConfigResource, Config>> future = new KafkaFutureImpl<>();
|
||||
future.complete(Collections.singletonMap(resource, new Config(configEntries)));
|
||||
future.complete(Map.of(resource, new Config(configEntries)));
|
||||
DescribeConfigsResult describeResult = mock(DescribeConfigsResult.class);
|
||||
when(describeResult.all()).thenReturn(future);
|
||||
|
||||
|
@ -1338,7 +1329,7 @@ public class ConfigCommandTest {
|
|||
AlterConfigsResult alterResult = mock(AlterConfigsResult.class);
|
||||
when(alterResult.all()).thenReturn(alterFuture);
|
||||
|
||||
MockAdminClient mockAdminClient = new MockAdminClient(Collections.singletonList(node), node) {
|
||||
MockAdminClient mockAdminClient = new MockAdminClient(List.of(node), node) {
|
||||
@Override
|
||||
public synchronized DescribeConfigsResult describeConfigs(Collection<ConfigResource> resources, DescribeConfigsOptions options) {
|
||||
assertFalse(options.includeSynonyms(), "Config synonyms requested unnecessarily");
|
||||
|
@ -1358,7 +1349,7 @@ public class ConfigCommandTest {
|
|||
assertEquals(ConfigResource.Type.GROUP, res.type());
|
||||
assertEquals(2, alterConfigOps.size());
|
||||
|
||||
List<AlterConfigOp> expectedConfigOps = Arrays.asList(
|
||||
List<AlterConfigOp> expectedConfigOps = List.of(
|
||||
new AlterConfigOp(new ConfigEntry("consumer.session.timeout.ms", ""), AlterConfigOp.OpType.DELETE),
|
||||
new AlterConfigOp(new ConfigEntry("consumer.heartbeat.interval.ms", "6000"), AlterConfigOp.OpType.SET)
|
||||
);
|
||||
|
@ -1392,7 +1383,7 @@ public class ConfigCommandTest {
|
|||
when(describeResult.all()).thenReturn(future);
|
||||
|
||||
Node node = new Node(1, "localhost", 9092);
|
||||
MockAdminClient mockAdminClient = new MockAdminClient(Collections.singletonList(node), node) {
|
||||
MockAdminClient mockAdminClient = new MockAdminClient(List.of(node), node) {
|
||||
@Override
|
||||
public synchronized DescribeConfigsResult describeConfigs(Collection<ConfigResource> resources, DescribeConfigsOptions options) {
|
||||
assertTrue(options.includeSynonyms());
|
||||
|
@ -1400,12 +1391,12 @@ public class ConfigCommandTest {
|
|||
ConfigResource resource = resources.iterator().next();
|
||||
assertEquals(ConfigResource.Type.GROUP, resource.type());
|
||||
assertEquals(resourceCustom.name(), resource.name());
|
||||
future.complete(Collections.singletonMap(resourceCustom, new Config(Collections.singletonList(configEntry))));
|
||||
future.complete(Map.of(resourceCustom, new Config(List.of(configEntry))));
|
||||
return describeResult;
|
||||
}
|
||||
};
|
||||
mockAdminClient.incrementalAlterConfigs(Collections.singletonMap(resourceCustom,
|
||||
Collections.singletonList(new AlterConfigOp(configEntry, AlterConfigOp.OpType.SET))), new AlterConfigsOptions());
|
||||
mockAdminClient.incrementalAlterConfigs(Map.of(resourceCustom,
|
||||
List.of(new AlterConfigOp(configEntry, AlterConfigOp.OpType.SET))), new AlterConfigsOptions());
|
||||
ConfigCommand.describeConfig(mockAdminClient, describeOpts);
|
||||
verify(describeResult).all();
|
||||
}
|
||||
|
@ -1432,7 +1423,7 @@ public class ConfigCommandTest {
|
|||
|
||||
@SafeVarargs
|
||||
public static List<String> concat(List<String>... lists) {
|
||||
return Stream.of(lists).flatMap(List::stream).collect(Collectors.toList());
|
||||
return Stream.of(lists).flatMap(List::stream).toList();
|
||||
}
|
||||
|
||||
@SafeVarargs
|
||||
|
@ -1448,7 +1439,7 @@ public class ConfigCommandTest {
|
|||
|
||||
static class DummyAdminClient extends MockAdminClient {
|
||||
public DummyAdminClient(Node node) {
|
||||
super(Collections.singletonList(node), node);
|
||||
super(List.of(node), node);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -568,7 +568,7 @@ public class ConnectPluginPathTest {
|
|||
} catch (IOException e) {
|
||||
throw new UncheckedIOException(e);
|
||||
}
|
||||
return new WorkerConfig(path, Arrays.asList(pluginPathElements));
|
||||
return new WorkerConfig(path, List.of(pluginPathElements));
|
||||
}
|
||||
|
||||
private static class CommandResult {
|
||||
|
|
|
@ -35,9 +35,9 @@ import java.io.OutputStream;
|
|||
import java.nio.file.Files;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static java.util.Arrays.asList;
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
|
@ -93,7 +93,7 @@ public class ConsoleProducerTest {
|
|||
ConsoleProducerOptions opts = new ConsoleProducerOptions(BOOTSTRAP_SERVER_VALID_ARGS);
|
||||
ProducerConfig producerConfig = new ProducerConfig(opts.producerProps());
|
||||
|
||||
assertEquals(asList("localhost:1003", "localhost:1004"), producerConfig.getList(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG));
|
||||
assertEquals(List.of("localhost:1003", "localhost:1004"), producerConfig.getList(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -147,7 +147,7 @@ public class ConsoleProducerTest {
|
|||
ConsoleProducerOptions opts = new ConsoleProducerOptions(BOOTSTRAP_SERVER_OVERRIDE);
|
||||
ProducerConfig producerConfig = new ProducerConfig(opts.producerProps());
|
||||
|
||||
assertEquals(Collections.singletonList("localhost:1002"), producerConfig.getList(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG));
|
||||
assertEquals(List.of("localhost:1002"), producerConfig.getList(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -24,8 +24,6 @@ import org.apache.kafka.common.utils.Exit;
|
|||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
|
||||
|
@ -55,12 +53,12 @@ public class DelegationTokenCommandTest {
|
|||
|
||||
tokens = DelegationTokenCommand.describeToken(adminClient, getDescribeOpts(""));
|
||||
assertEquals(2, tokens.size());
|
||||
assertEquals(Arrays.asList(token1, token2), tokens);
|
||||
assertEquals(List.of(token1, token2), tokens);
|
||||
|
||||
//get tokens for renewer2
|
||||
tokens = DelegationTokenCommand.describeToken(adminClient, getDescribeOpts(renewer2));
|
||||
assertEquals(1, tokens.size());
|
||||
assertEquals(Collections.singletonList(token2), tokens);
|
||||
assertEquals(List.of(token2), tokens);
|
||||
|
||||
//test renewing tokens
|
||||
Long expiryTimestamp = DelegationTokenCommand.renewToken(adminClient, getRenewOpts(token1.hmacAsBase64String()));
|
||||
|
|
|
@ -33,12 +33,11 @@ import org.junit.jupiter.api.Test;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.NoSuchFileException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
||||
|
@ -62,7 +61,7 @@ public class DeleteRecordsCommandTest {
|
|||
"Offset json file contains duplicate topic partitions: t-0"
|
||||
);
|
||||
|
||||
admin.createTopics(Collections.singleton(new NewTopic("t", 1, (short) 1))).all().get();
|
||||
admin.createTopics(Set.of(new NewTopic("t", 1, (short) 1))).all().get();
|
||||
|
||||
Properties props = new Properties();
|
||||
|
||||
|
@ -159,8 +158,8 @@ public class DeleteRecordsCommandTest {
|
|||
);
|
||||
|
||||
assertEquals(2, res.size());
|
||||
assertEquals(Arrays.asList(0L, 2L, 0L), res.get(new TopicPartition("t", 0)));
|
||||
assertEquals(Collections.singletonList(1L), res.get(new TopicPartition("t", 1)));
|
||||
assertEquals(List.of(0L, 2L, 0L), res.get(new TopicPartition("t", 0)));
|
||||
assertEquals(List.of(1L), res.get(new TopicPartition("t", 1)));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -28,15 +28,11 @@ import net.sourceforge.argparse4j.inf.Namespace;
|
|||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static java.lang.String.format;
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static org.apache.kafka.clients.admin.FeatureUpdate.UpgradeType.SAFE_DOWNGRADE;
|
||||
import static org.apache.kafka.clients.admin.FeatureUpdate.UpgradeType.UNSAFE_DOWNGRADE;
|
||||
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
|
||||
|
@ -46,7 +42,7 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
|
|||
|
||||
public class FeatureCommandTest {
|
||||
|
||||
private final List<Feature> testingFeatures = Arrays.stream(Feature.FEATURES).collect(Collectors.toList());
|
||||
private final List<Feature> testingFeatures = Arrays.stream(Feature.FEATURES).toList();
|
||||
|
||||
@ClusterTest(types = {Type.KRAFT}, metadataVersion = MetadataVersion.IBP_3_3_IV3)
|
||||
public void testDescribeWithKRaft(ClusterInstance cluster) {
|
||||
|
@ -54,7 +50,7 @@ public class FeatureCommandTest {
|
|||
assertEquals(0, FeatureCommand.mainNoExit("--bootstrap-server", cluster.bootstrapServers(), "describe"))
|
||||
);
|
||||
|
||||
List<String> features = Arrays.stream(commandOutput.split("\n")).sorted().collect(Collectors.toList());
|
||||
List<String> features = Arrays.stream(commandOutput.split("\n")).sorted().toList();
|
||||
|
||||
// Change expected message to reflect latest MetadataVersion (SupportedMaxVersion increases when adding a new version)
|
||||
assertEquals("Feature: eligible.leader.replicas.version\tSupportedMinVersion: 0\t" +
|
||||
|
@ -80,7 +76,7 @@ public class FeatureCommandTest {
|
|||
assertEquals(0, FeatureCommand.mainNoExit("--bootstrap-controller", cluster.bootstrapControllers(), "describe"))
|
||||
);
|
||||
|
||||
List<String> features = Arrays.stream(commandOutput.split("\n")).sorted().collect(Collectors.toList());
|
||||
List<String> features = Arrays.stream(commandOutput.split("\n")).sorted().toList();
|
||||
|
||||
// Change expected message to reflect latest MetadataVersion (SupportedMaxVersion increases when adding a new version)
|
||||
assertEquals("Feature: eligible.leader.replicas.version\tSupportedMinVersion: 0\t" +
|
||||
|
@ -223,10 +219,10 @@ public class FeatureCommandTest {
|
|||
@Test
|
||||
public void testDowngradeType() {
|
||||
assertEquals(SAFE_DOWNGRADE, FeatureCommand.downgradeType(
|
||||
new Namespace(singletonMap("unsafe", Boolean.FALSE))));
|
||||
new Namespace(Map.of("unsafe", Boolean.FALSE))));
|
||||
assertEquals(UNSAFE_DOWNGRADE, FeatureCommand.downgradeType(
|
||||
new Namespace(singletonMap("unsafe", Boolean.TRUE))));
|
||||
assertEquals(SAFE_DOWNGRADE, FeatureCommand.downgradeType(new Namespace(emptyMap())));
|
||||
new Namespace(Map.of("unsafe", Boolean.TRUE))));
|
||||
assertEquals(SAFE_DOWNGRADE, FeatureCommand.downgradeType(new Namespace(Map.of())));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -275,7 +271,7 @@ public class FeatureCommandTest {
|
|||
public void testHandleUpgradeToUnsupportedMetadataVersion() {
|
||||
Map<String, Object> namespace = new HashMap<>();
|
||||
namespace.put("metadata", "3.3-IV1");
|
||||
namespace.put("feature", Collections.singletonList("foo.bar=6"));
|
||||
namespace.put("feature", List.of("foo.bar=6"));
|
||||
namespace.put("dry_run", false);
|
||||
Throwable t = assertThrows(TerseException.class, () -> FeatureCommand.handleUpgrade(new Namespace(namespace), buildAdminClient()));
|
||||
assertTrue(t.getMessage().contains("Unknown metadata.version 3.3-IV1"));
|
||||
|
@ -285,7 +281,7 @@ public class FeatureCommandTest {
|
|||
public void testHandleUpgradeToLowerVersion() {
|
||||
Map<String, Object> namespace = new HashMap<>();
|
||||
namespace.put("metadata", "3.3-IV3");
|
||||
namespace.put("feature", Collections.singletonList("foo.bar=6"));
|
||||
namespace.put("feature", List.of("foo.bar=6"));
|
||||
namespace.put("dry_run", false);
|
||||
String upgradeOutput = ToolsTestUtils.captureStandardOut(() -> {
|
||||
Throwable t = assertThrows(TerseException.class, () -> FeatureCommand.handleUpgrade(new Namespace(namespace), buildAdminClient()));
|
||||
|
@ -300,7 +296,7 @@ public class FeatureCommandTest {
|
|||
public void testHandleUpgradeToLowerVersionDryRun() {
|
||||
Map<String, Object> namespace = new HashMap<>();
|
||||
namespace.put("metadata", "3.3-IV3");
|
||||
namespace.put("feature", Collections.singletonList("foo.bar=6"));
|
||||
namespace.put("feature", List.of("foo.bar=6"));
|
||||
namespace.put("dry_run", true);
|
||||
String upgradeOutput = ToolsTestUtils.captureStandardOut(() -> {
|
||||
Throwable t = assertThrows(TerseException.class, () -> FeatureCommand.handleUpgrade(new Namespace(namespace), buildAdminClient()));
|
||||
|
@ -315,7 +311,7 @@ public class FeatureCommandTest {
|
|||
public void testHandleDowngrade() {
|
||||
Map<String, Object> namespace = new HashMap<>();
|
||||
namespace.put("metadata", "3.7-IV0");
|
||||
namespace.put("feature", Collections.singletonList("foo.bar=1"));
|
||||
namespace.put("feature", List.of("foo.bar=1"));
|
||||
namespace.put("dry_run", false);
|
||||
String downgradeOutput = ToolsTestUtils.captureStandardOut(() -> {
|
||||
Throwable t = assertThrows(TerseException.class, () -> FeatureCommand.handleDowngrade(new Namespace(namespace), buildAdminClient()));
|
||||
|
@ -330,7 +326,7 @@ public class FeatureCommandTest {
|
|||
public void testHandleDowngradeDryRun() {
|
||||
Map<String, Object> namespace = new HashMap<>();
|
||||
namespace.put("metadata", "3.7-IV0");
|
||||
namespace.put("feature", Collections.singletonList("foo.bar=1"));
|
||||
namespace.put("feature", List.of("foo.bar=1"));
|
||||
namespace.put("dry_run", true);
|
||||
String downgradeOutput = ToolsTestUtils.captureStandardOut(() -> {
|
||||
Throwable t = assertThrows(TerseException.class, () -> FeatureCommand.handleDowngrade(new Namespace(namespace), buildAdminClient()));
|
||||
|
@ -344,7 +340,7 @@ public class FeatureCommandTest {
|
|||
@Test
|
||||
public void testHandleDisable() {
|
||||
Map<String, Object> namespace = new HashMap<>();
|
||||
namespace.put("feature", Arrays.asList("foo.bar", "metadata.version", "quux"));
|
||||
namespace.put("feature", List.of("foo.bar", "metadata.version", "quux"));
|
||||
namespace.put("dry_run", false);
|
||||
String disableOutput = ToolsTestUtils.captureStandardOut(() -> {
|
||||
Throwable t = assertThrows(TerseException.class, () -> FeatureCommand.handleDisable(new Namespace(namespace), buildAdminClient()));
|
||||
|
@ -358,7 +354,7 @@ public class FeatureCommandTest {
|
|||
@Test
|
||||
public void testHandleDisableDryRun() {
|
||||
Map<String, Object> namespace = new HashMap<>();
|
||||
namespace.put("feature", Arrays.asList("foo.bar", "metadata.version", "quux"));
|
||||
namespace.put("feature", List.of("foo.bar", "metadata.version", "quux"));
|
||||
namespace.put("dry_run", true);
|
||||
String disableOutput = ToolsTestUtils.captureStandardOut(() -> {
|
||||
Throwable t = assertThrows(TerseException.class, () -> FeatureCommand.handleDisable(new Namespace(namespace), buildAdminClient()));
|
||||
|
@ -383,7 +379,7 @@ public class FeatureCommandTest {
|
|||
public void testIncompatibleUpgradeFlags() {
|
||||
Map<String, Object> namespace = new HashMap<>();
|
||||
namespace.put("release_version", "3.3-IV3");
|
||||
namespace.put("feature", Arrays.asList("foo.bar", "metadata.version", "quux"));
|
||||
namespace.put("feature", List.of("foo.bar", "metadata.version", "quux"));
|
||||
ToolsTestUtils.captureStandardOut(() -> {
|
||||
Throwable t = assertThrows(TerseException.class, () -> FeatureCommand.handleUpgrade(new Namespace(namespace), buildAdminClient()));
|
||||
assertTrue(t.getMessage().contains("Can not specify `release-version` with other feature flags."));
|
||||
|
@ -474,7 +470,7 @@ public class FeatureCommandTest {
|
|||
@Test
|
||||
public void testHandleFeatureDependenciesForFeatureWithDependencies() {
|
||||
Map<String, Object> namespace = new HashMap<>();
|
||||
namespace.put("feature", Collections.singletonList("test.feature.version=2"));
|
||||
namespace.put("feature", List.of("test.feature.version=2"));
|
||||
|
||||
String output = ToolsTestUtils.captureStandardOut(() -> {
|
||||
try {
|
||||
|
@ -496,7 +492,7 @@ public class FeatureCommandTest {
|
|||
@Test
|
||||
public void testHandleFeatureDependenciesForFeatureWithNoDependencies() {
|
||||
Map<String, Object> namespace = new HashMap<>();
|
||||
namespace.put("feature", Collections.singletonList("metadata.version=17"));
|
||||
namespace.put("feature", List.of("metadata.version=17"));
|
||||
|
||||
String output = ToolsTestUtils.captureStandardOut(() -> {
|
||||
try {
|
||||
|
@ -512,7 +508,7 @@ public class FeatureCommandTest {
|
|||
@Test
|
||||
public void testHandleFeatureDependenciesForUnknownFeature() {
|
||||
Map<String, Object> namespace = new HashMap<>();
|
||||
namespace.put("feature", Collections.singletonList("unknown.feature=1"));
|
||||
namespace.put("feature", List.of("unknown.feature=1"));
|
||||
|
||||
Exception exception = assertThrows(
|
||||
TerseException.class,
|
||||
|
@ -525,7 +521,7 @@ public class FeatureCommandTest {
|
|||
@Test
|
||||
public void testHandleFeatureDependenciesForFeatureWithUnknownFeatureVersion() {
|
||||
Map<String, Object> namespace = new HashMap<>();
|
||||
namespace.put("feature", Collections.singletonList("transaction.version=1000"));
|
||||
namespace.put("feature", List.of("transaction.version=1000"));
|
||||
|
||||
Exception exception = assertThrows(
|
||||
IllegalArgumentException.class,
|
||||
|
@ -538,7 +534,7 @@ public class FeatureCommandTest {
|
|||
@Test
|
||||
public void testHandleFeatureDependenciesForInvalidVersionFormat() {
|
||||
Map<String, Object> namespace = new HashMap<>();
|
||||
namespace.put("feature", Collections.singletonList("metadata.version=invalid"));
|
||||
namespace.put("feature", List.of("metadata.version=invalid"));
|
||||
|
||||
RuntimeException exception = assertThrows(
|
||||
RuntimeException.class,
|
||||
|
@ -554,7 +550,7 @@ public class FeatureCommandTest {
|
|||
@Test
|
||||
public void testHandleFeatureDependenciesForMultipleFeatures() {
|
||||
Map<String, Object> namespace = new HashMap<>();
|
||||
namespace.put("feature", Arrays.asList(
|
||||
namespace.put("feature", List.of(
|
||||
"transaction.version=2",
|
||||
"group.version=1",
|
||||
"test.feature.version=2"
|
||||
|
|
|
@ -46,7 +46,6 @@ import org.apache.kafka.test.TestUtils;
|
|||
import java.time.Duration;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -85,7 +84,7 @@ public class GetOffsetShellTest {
|
|||
}
|
||||
|
||||
private void setUp() {
|
||||
setupTopics(this::getTopicName, Collections.emptyMap());
|
||||
setupTopics(this::getTopicName, Map.of());
|
||||
sendProducerRecords(this::getTopicName);
|
||||
}
|
||||
|
||||
|
@ -139,7 +138,7 @@ public class GetOffsetShellTest {
|
|||
serverProperties.put(ServerLogConfigs.LOG_INITIAL_TASK_DELAY_MS_CONFIG, "100");
|
||||
serverProperties.put(RemoteLogManagerConfig.REMOTE_LOG_METADATA_MANAGER_LISTENER_NAME_PROP, "EXTERNAL");
|
||||
|
||||
return Collections.singletonList(
|
||||
return List.of(
|
||||
ClusterConfig.defaultBuilder()
|
||||
.setTypes(Stream.of(KRAFT, CO_KRAFT).collect(Collectors.toSet()))
|
||||
.setServerProperties(serverProperties)
|
||||
|
@ -238,7 +237,7 @@ public class GetOffsetShellTest {
|
|||
setUp();
|
||||
|
||||
List<Row> offsets = executeAndParse("--partitions", "0,1");
|
||||
assertEquals(expectedTestTopicOffsets().stream().filter(r -> r.partition <= 1).collect(Collectors.toList()), offsets);
|
||||
assertEquals(expectedTestTopicOffsets().stream().filter(r -> r.partition <= 1).toList(), offsets);
|
||||
}
|
||||
|
||||
@ClusterTest
|
||||
|
@ -247,7 +246,7 @@ public class GetOffsetShellTest {
|
|||
|
||||
List<Row> offsets = executeAndParse("--topic", "topic.*", "--partitions", "0,1");
|
||||
|
||||
assertEquals(expectedTestTopicOffsets().stream().filter(r -> r.partition <= 1).collect(Collectors.toList()), offsets);
|
||||
assertEquals(expectedTestTopicOffsets().stream().filter(r -> r.partition <= 1).toList(), offsets);
|
||||
}
|
||||
|
||||
@ClusterTest
|
||||
|
@ -257,7 +256,7 @@ public class GetOffsetShellTest {
|
|||
createConsumerAndPoll();
|
||||
|
||||
List<Row> offsets = executeAndParse("--topic-partitions", "topic1:0,topic2:1,topic(3|4):2,__.*:3");
|
||||
List<Row> expected = Arrays.asList(
|
||||
List<Row> expected = List.of(
|
||||
new Row("__consumer_offsets", 3, 0L),
|
||||
new Row("topic1", 0, 1L),
|
||||
new Row("topic2", 1, 2L),
|
||||
|
@ -274,7 +273,7 @@ public class GetOffsetShellTest {
|
|||
|
||||
for (String time : new String[] {"-1", "latest"}) {
|
||||
List<Row> offsets = executeAndParse("--topic-partitions", "topic.*:0", "--time", time);
|
||||
List<Row> expected = Arrays.asList(
|
||||
List<Row> expected = List.of(
|
||||
new Row("topic1", 0, 1L),
|
||||
new Row("topic2", 0, 2L),
|
||||
new Row("topic3", 0, 3L),
|
||||
|
@ -291,7 +290,7 @@ public class GetOffsetShellTest {
|
|||
|
||||
for (String time : new String[] {"-2", "earliest"}) {
|
||||
List<Row> offsets = executeAndParse("--topic-partitions", "topic.*:0", "--time", time);
|
||||
List<Row> expected = Arrays.asList(
|
||||
List<Row> expected = List.of(
|
||||
new Row("topic1", 0, 0L),
|
||||
new Row("topic2", 0, 0L),
|
||||
new Row("topic3", 0, 0L),
|
||||
|
@ -324,7 +323,7 @@ public class GetOffsetShellTest {
|
|||
// test topics disable remote log storage
|
||||
// as remote log disabled, broker return the same result as earliest offset
|
||||
TestUtils.waitForCondition(() ->
|
||||
Arrays.asList(
|
||||
List.of(
|
||||
new Row("topic1", 0, 0L),
|
||||
new Row("topic2", 0, 0L),
|
||||
new Row("topic3", 0, 0L),
|
||||
|
@ -334,7 +333,7 @@ public class GetOffsetShellTest {
|
|||
|
||||
// test topics enable remote log storage
|
||||
TestUtils.waitForCondition(() ->
|
||||
Arrays.asList(
|
||||
List.of(
|
||||
new Row("topicRLS1", 0, 0L),
|
||||
new Row("topicRLS2", 0, 1L),
|
||||
new Row("topicRLS3", 0, 2L),
|
||||
|
@ -353,13 +352,13 @@ public class GetOffsetShellTest {
|
|||
// test topics disable remote log storage
|
||||
// as remote log not enabled, broker return unknown offset for each topic partition and these
|
||||
// unknown offsets are ignored by GetOffsetShell hence we have empty result here.
|
||||
assertEquals(Collections.emptyList(),
|
||||
assertEquals(List.of(),
|
||||
executeAndParse("--topic-partitions", "topic\\d+:0", "--time", time));
|
||||
|
||||
// test topics enable remote log storage
|
||||
// topicRLS1 has no result because there's no log segments being uploaded to the remote storage
|
||||
TestUtils.waitForCondition(() ->
|
||||
Arrays.asList(
|
||||
List.of(
|
||||
new Row("topicRLS2", 0, 0L),
|
||||
new Row("topicRLS3", 0, 1L),
|
||||
new Row("topicRLS4", 0, 2L))
|
||||
|
@ -375,7 +374,7 @@ public class GetOffsetShellTest {
|
|||
String time = String.valueOf(System.currentTimeMillis() / 2);
|
||||
|
||||
List<Row> offsets = executeAndParse("--topic-partitions", "topic.*:0", "--time", time);
|
||||
List<Row> expected = Arrays.asList(
|
||||
List<Row> expected = List.of(
|
||||
new Row("topic1", 0, 0L),
|
||||
new Row("topic2", 0, 0L),
|
||||
new Row("topic3", 0, 0L),
|
||||
|
@ -401,7 +400,7 @@ public class GetOffsetShellTest {
|
|||
setUp();
|
||||
|
||||
List<Row> offsets = executeAndParse("--topic-partitions", "topic1:0,topic2:1,topic(3|4):2,__.*:3", "--exclude-internal-topics");
|
||||
List<Row> expected = Arrays.asList(
|
||||
List<Row> expected = List.of(
|
||||
new Row("topic1", 0, 1L),
|
||||
new Row("topic2", 1, 2L),
|
||||
new Row("topic3", 2, 3L),
|
||||
|
@ -419,7 +418,7 @@ public class GetOffsetShellTest {
|
|||
|
||||
List<Row> offsets = executeAndParse("--topic-partitions", "__.*:0");
|
||||
|
||||
assertEquals(Arrays.asList(new Row("__consumer_offsets", 0, 0L)), offsets);
|
||||
assertEquals(List.of(new Row("__consumer_offsets", 0, 0L)), offsets);
|
||||
}
|
||||
|
||||
@ClusterTest
|
||||
|
@ -487,9 +486,9 @@ public class GetOffsetShellTest {
|
|||
private List<Row> expectedOffsetsWithInternal() {
|
||||
List<Row> consOffsets = IntStream.range(0, 4)
|
||||
.mapToObj(i -> new Row("__consumer_offsets", i, 0L))
|
||||
.collect(Collectors.toList());
|
||||
.toList();
|
||||
|
||||
return Stream.concat(consOffsets.stream(), expectedTestTopicOffsets().stream()).collect(Collectors.toList());
|
||||
return Stream.concat(consOffsets.stream(), expectedTestTopicOffsets().stream()).toList();
|
||||
}
|
||||
|
||||
private List<Row> expectedTestTopicOffsets() {
|
||||
|
@ -505,7 +504,7 @@ public class GetOffsetShellTest {
|
|||
private List<Row> expectedOffsetsForTopic(int i) {
|
||||
String name = getTopicName(i);
|
||||
|
||||
return IntStream.range(0, i).mapToObj(p -> new Row(name, p, (long) i)).collect(Collectors.toList());
|
||||
return IntStream.range(0, i).mapToObj(p -> new Row(name, p, (long) i)).toList();
|
||||
}
|
||||
|
||||
private List<Row> executeAndParse(String... args) {
|
||||
|
@ -515,11 +514,11 @@ public class GetOffsetShellTest {
|
|||
.map(i -> i.split(":"))
|
||||
.filter(i -> i.length >= 2)
|
||||
.map(line -> new Row(line[0], Integer.parseInt(line[1]), (line.length == 2 || line[2].isEmpty()) ? null : Long.parseLong(line[2])))
|
||||
.collect(Collectors.toList());
|
||||
.toList();
|
||||
}
|
||||
|
||||
private String[] addBootstrapServer(String... args) {
|
||||
ArrayList<String> newArgs = new ArrayList<>(Arrays.asList(args));
|
||||
ArrayList<String> newArgs = new ArrayList<>(List.of(args));
|
||||
newArgs.add("--bootstrap-server");
|
||||
newArgs.add(cluster.bootstrapServers());
|
||||
|
||||
|
|
|
@ -139,9 +139,9 @@ public class JmxToolTest {
|
|||
"--one-time"
|
||||
};
|
||||
String out = executeAndGetOut(args);
|
||||
Arrays.stream(out.split("\\r?\\n")).forEach(line -> {
|
||||
assertTrue(line.matches("([a-zA-Z0-9=:,.]+),\"([ -~]+)\""), line);
|
||||
});
|
||||
Arrays.stream(out.split("\\r?\\n")).forEach(line ->
|
||||
assertTrue(line.matches("([a-zA-Z0-9=:,.]+),\"([ -~]+)\""), line)
|
||||
);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -153,9 +153,9 @@ public class JmxToolTest {
|
|||
"--one-time"
|
||||
};
|
||||
String out = executeAndGetOut(args);
|
||||
Arrays.stream(out.split("\\r?\\n")).forEach(line -> {
|
||||
assertTrue(line.matches("([a-zA-Z0-9=:,.]+)\\t([ -~]+)"), line);
|
||||
});
|
||||
Arrays.stream(out.split("\\r?\\n")).forEach(line ->
|
||||
assertTrue(line.matches("([a-zA-Z0-9=:,.]+)\\t([ -~]+)"), line)
|
||||
);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -37,7 +37,6 @@ import java.nio.file.Files;
|
|||
import java.nio.file.Path;
|
||||
import java.time.Duration;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
|
@ -49,8 +48,6 @@ import java.util.concurrent.ExecutionException;
|
|||
|
||||
import scala.jdk.javaapi.CollectionConverters;
|
||||
|
||||
import static java.util.Arrays.asList;
|
||||
import static java.util.Collections.singletonList;
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertInstanceOf;
|
||||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
||||
|
@ -76,20 +73,20 @@ public class LeaderElectionCommandTest {
|
|||
public void testAllTopicPartition() throws InterruptedException, ExecutionException {
|
||||
String topic = "unclean-topic";
|
||||
int partition = 0;
|
||||
List<Integer> assignment = asList(broker2, broker3);
|
||||
List<Integer> assignment = List.of(broker2, broker3);
|
||||
|
||||
cluster.waitForReadyBrokers();
|
||||
try (Admin client = cluster.admin()) {
|
||||
|
||||
createTopic(client, topic, Collections.singletonMap(partition, assignment));
|
||||
createTopic(client, topic, Map.of(partition, assignment));
|
||||
|
||||
TopicPartition topicPartition = new TopicPartition(topic, partition);
|
||||
|
||||
TestUtils.assertLeader(client, topicPartition, broker2);
|
||||
cluster.shutdownBroker(broker3);
|
||||
TestUtils.waitForBrokersOutOfIsr(client,
|
||||
CollectionConverters.asScala(singletonList(topicPartition)).toSet(),
|
||||
CollectionConverters.asScala(singletonList(broker3)).toSet()
|
||||
CollectionConverters.asScala(List.of(topicPartition)).toSet(),
|
||||
CollectionConverters.asScala(List.of(broker3)).toSet()
|
||||
);
|
||||
cluster.shutdownBroker(broker2);
|
||||
TestUtils.assertNoLeader(client, topicPartition);
|
||||
|
@ -133,11 +130,11 @@ public class LeaderElectionCommandTest {
|
|||
public void testTopicPartition() throws InterruptedException, ExecutionException {
|
||||
String topic = "unclean-topic";
|
||||
int partition = 0;
|
||||
List<Integer> assignment = asList(broker2, broker3);
|
||||
List<Integer> assignment = List.of(broker2, broker3);
|
||||
|
||||
cluster.waitForReadyBrokers();
|
||||
try (Admin client = cluster.admin()) {
|
||||
createTopic(client, topic, Collections.singletonMap(partition, assignment));
|
||||
createTopic(client, topic, Map.of(partition, assignment));
|
||||
|
||||
TopicPartition topicPartition = new TopicPartition(topic, partition);
|
||||
|
||||
|
@ -145,8 +142,8 @@ public class LeaderElectionCommandTest {
|
|||
|
||||
cluster.shutdownBroker(broker3);
|
||||
TestUtils.waitForBrokersOutOfIsr(client,
|
||||
CollectionConverters.asScala(singletonList(topicPartition)).toSet(),
|
||||
CollectionConverters.asScala(singletonList(broker3)).toSet()
|
||||
CollectionConverters.asScala(List.of(topicPartition)).toSet(),
|
||||
CollectionConverters.asScala(List.of(broker3)).toSet()
|
||||
);
|
||||
cluster.shutdownBroker(broker2);
|
||||
TestUtils.assertNoLeader(client, topicPartition);
|
||||
|
@ -168,7 +165,7 @@ public class LeaderElectionCommandTest {
|
|||
public void testPathToJsonFile() throws Exception {
|
||||
String topic = "unclean-topic";
|
||||
int partition = 0;
|
||||
List<Integer> assignment = asList(broker2, broker3);
|
||||
List<Integer> assignment = List.of(broker2, broker3);
|
||||
|
||||
cluster.waitForReadyBrokers();
|
||||
Map<Integer, List<Integer>> partitionAssignment = new HashMap<>();
|
||||
|
@ -183,15 +180,15 @@ public class LeaderElectionCommandTest {
|
|||
|
||||
cluster.shutdownBroker(broker3);
|
||||
TestUtils.waitForBrokersOutOfIsr(client,
|
||||
CollectionConverters.asScala(singletonList(topicPartition)).toSet(),
|
||||
CollectionConverters.asScala(singletonList(broker3)).toSet()
|
||||
CollectionConverters.asScala(List.of(topicPartition)).toSet(),
|
||||
CollectionConverters.asScala(List.of(broker3)).toSet()
|
||||
);
|
||||
cluster.shutdownBroker(broker2);
|
||||
TestUtils.assertNoLeader(client, topicPartition);
|
||||
cluster.startBroker(broker3);
|
||||
TestUtils.waitForOnlineBroker(client, broker3);
|
||||
|
||||
Path topicPartitionPath = tempTopicPartitionFile(singletonList(topicPartition));
|
||||
Path topicPartitionPath = tempTopicPartitionFile(List.of(topicPartition));
|
||||
|
||||
assertEquals(0, LeaderElectionCommand.mainNoExit(
|
||||
"--bootstrap-server", cluster.bootstrapServers(),
|
||||
|
@ -207,7 +204,7 @@ public class LeaderElectionCommandTest {
|
|||
public void testPreferredReplicaElection() throws InterruptedException, ExecutionException {
|
||||
String topic = "preferred-topic";
|
||||
int partition = 0;
|
||||
List<Integer> assignment = asList(broker2, broker3);
|
||||
List<Integer> assignment = List.of(broker2, broker3);
|
||||
|
||||
cluster.waitForReadyBrokers();
|
||||
try (Admin client = cluster.admin()) {
|
||||
|
@ -224,7 +221,7 @@ public class LeaderElectionCommandTest {
|
|||
TestUtils.assertLeader(client, topicPartition, broker3);
|
||||
cluster.startBroker(broker2);
|
||||
TestUtils.waitForBrokersInIsr(client, topicPartition,
|
||||
CollectionConverters.asScala(singletonList(broker2)).toSet()
|
||||
CollectionConverters.asScala(List.of(broker2)).toSet()
|
||||
);
|
||||
|
||||
assertEquals(0, LeaderElectionCommand.mainNoExit(
|
||||
|
@ -254,8 +251,8 @@ public class LeaderElectionCommandTest {
|
|||
String topic = "non-preferred-topic";
|
||||
int partition0 = 0;
|
||||
int partition1 = 1;
|
||||
List<Integer> assignment0 = asList(broker2, broker3);
|
||||
List<Integer> assignment1 = asList(broker3, broker2);
|
||||
List<Integer> assignment0 = List.of(broker2, broker3);
|
||||
List<Integer> assignment1 = List.of(broker3, broker2);
|
||||
|
||||
cluster.waitForReadyBrokers();
|
||||
TopicPartition topicPartition0;
|
||||
|
@ -277,14 +274,14 @@ public class LeaderElectionCommandTest {
|
|||
TestUtils.assertLeader(client, topicPartition0, broker3);
|
||||
cluster.startBroker(broker2);
|
||||
TestUtils.waitForBrokersInIsr(client, topicPartition0,
|
||||
CollectionConverters.asScala(singletonList(broker2)).toSet()
|
||||
CollectionConverters.asScala(List.of(broker2)).toSet()
|
||||
);
|
||||
TestUtils.waitForBrokersInIsr(client, topicPartition1,
|
||||
CollectionConverters.asScala(singletonList(broker2)).toSet()
|
||||
CollectionConverters.asScala(List.of(broker2)).toSet()
|
||||
);
|
||||
}
|
||||
|
||||
Path topicPartitionPath = tempTopicPartitionFile(asList(topicPartition0, topicPartition1));
|
||||
Path topicPartitionPath = tempTopicPartitionFile(List.of(topicPartition0, topicPartition1));
|
||||
String output = ToolsTestUtils.captureStandardOut(() ->
|
||||
LeaderElectionCommand.mainNoExit(
|
||||
"--bootstrap-server", cluster.bootstrapServers(),
|
||||
|
@ -308,7 +305,7 @@ public class LeaderElectionCommandTest {
|
|||
|
||||
private void createTopic(Admin admin, String topic, Map<Integer, List<Integer>> replicaAssignment) throws ExecutionException, InterruptedException {
|
||||
NewTopic newTopic = new NewTopic(topic, replicaAssignment);
|
||||
List<NewTopic> newTopics = singletonList(newTopic);
|
||||
List<NewTopic> newTopics = List.of(newTopic);
|
||||
CreateTopicsResult createTopicResult = admin.createTopics(newTopics);
|
||||
createTopicResult.all().get();
|
||||
}
|
||||
|
|
|
@ -25,15 +25,12 @@ import org.apache.kafka.tools.api.RecordReader;
|
|||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.Properties;
|
||||
|
||||
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||
import static java.util.Arrays.asList;
|
||||
import static java.util.Collections.singletonList;
|
||||
import static org.apache.kafka.common.utils.Utils.propsToStringMap;
|
||||
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
|
@ -57,7 +54,7 @@ public class LineMessageReaderTest {
|
|||
ProducerRecord<String, String> expected = record(
|
||||
"key0",
|
||||
"value0",
|
||||
asList(
|
||||
List.of(
|
||||
new RecordHeader("headerKey0", "headerValue0".getBytes(UTF_8)),
|
||||
new RecordHeader("headerKey1", "headerValue1".getBytes(UTF_8))
|
||||
)
|
||||
|
@ -67,7 +64,7 @@ public class LineMessageReaderTest {
|
|||
|
||||
@Test
|
||||
public void testMinimalValidInputWithHeaderKeyAndValue() {
|
||||
runTest(defaultTestProps(), ":\t\t", record("", "", singletonList(new RecordHeader("", "".getBytes(UTF_8)))));
|
||||
runTest(defaultTestProps(), ":\t\t", record("", "", List.of(new RecordHeader("", "".getBytes(UTF_8)))));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -90,7 +87,7 @@ public class LineMessageReaderTest {
|
|||
"headerKey0.0::::headerValue0.0---headerKey1.0::::\t\tkey\t\tvalue",
|
||||
record("key",
|
||||
"value",
|
||||
asList(
|
||||
List.of(
|
||||
new RecordHeader("headerKey0.0", "headerValue0.0".getBytes(UTF_8)),
|
||||
new RecordHeader("headerKey1.0", "".getBytes(UTF_8))
|
||||
)
|
||||
|
@ -105,7 +102,7 @@ public class LineMessageReaderTest {
|
|||
Properties props = defaultTestProps();
|
||||
props.put("parse.key", "false");
|
||||
|
||||
runTest(props, input, record(null, "value", singletonList(new RecordHeader("headerKey", "headerValue".getBytes(UTF_8)))));
|
||||
runTest(props, input, record(null, "value", List.of(new RecordHeader("headerKey", "headerValue".getBytes(UTF_8)))));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -132,7 +129,7 @@ public class LineMessageReaderTest {
|
|||
ProducerRecord<String, String> record0 = record(
|
||||
"key0",
|
||||
"value0",
|
||||
asList(
|
||||
List.of(
|
||||
new RecordHeader("headerKey0.0", "headerValue0.0".getBytes(UTF_8)),
|
||||
new RecordHeader("headerKey0.1", "headerValue0.1".getBytes(UTF_8))
|
||||
)
|
||||
|
@ -140,7 +137,7 @@ public class LineMessageReaderTest {
|
|||
ProducerRecord<String, String> record1 = record(
|
||||
"key1",
|
||||
"value1",
|
||||
singletonList(new RecordHeader("headerKey1.0", "headerValue1.0".getBytes(UTF_8)))
|
||||
List.of(new RecordHeader("headerKey1.0", "headerValue1.0".getBytes(UTF_8)))
|
||||
);
|
||||
|
||||
runTest(props, input, record0, record1);
|
||||
|
@ -208,12 +205,12 @@ public class LineMessageReaderTest {
|
|||
props.put("ignore.error", "true");
|
||||
|
||||
ProducerRecord<String, String> validRecord = record("key0", "value0",
|
||||
singletonList(new RecordHeader("headerKey0.0", "headerValue0.0".getBytes(UTF_8))));
|
||||
List.of(new RecordHeader("headerKey0.0", "headerValue0.0".getBytes(UTF_8))));
|
||||
|
||||
ProducerRecord<String, String> missingHeaderDelimiter = record(
|
||||
null,
|
||||
"value1",
|
||||
asList(
|
||||
List.of(
|
||||
new RecordHeader("headerKey1.0", "headerValue1.0".getBytes(UTF_8)),
|
||||
new RecordHeader("headerKey1.1", "headerValue1.1[MISSING-HEADER-DELIMITER]key1".getBytes(UTF_8))
|
||||
)
|
||||
|
@ -222,13 +219,13 @@ public class LineMessageReaderTest {
|
|||
ProducerRecord<String, String> missingKeyDelimiter = record(
|
||||
null,
|
||||
"key2[MISSING-KEY-DELIMITER]value2",
|
||||
singletonList(new RecordHeader("headerKey2.0", "headerValue2.0".getBytes(UTF_8)))
|
||||
List.of(new RecordHeader("headerKey2.0", "headerValue2.0".getBytes(UTF_8)))
|
||||
);
|
||||
|
||||
ProducerRecord<String, String> missingKeyHeaderDelimiter = record(
|
||||
null,
|
||||
"headerKey3.0:headerValue3.0[MISSING-HEADER-DELIMITER]key3[MISSING-KEY-DELIMITER]value3",
|
||||
Collections.emptyList()
|
||||
List.of()
|
||||
);
|
||||
|
||||
runTest(props, input, validRecord, missingHeaderDelimiter, missingKeyDelimiter, missingKeyHeaderDelimiter);
|
||||
|
@ -241,7 +238,7 @@ public class LineMessageReaderTest {
|
|||
Properties props = defaultTestProps();
|
||||
props.put("ignore.error", "true");
|
||||
|
||||
ProducerRecord<String, String> expected = record("key0", "value0", singletonList(new RecordHeader("key-val", null)));
|
||||
ProducerRecord<String, String> expected = record("key0", "value0", List.of(new RecordHeader("key-val", null)));
|
||||
|
||||
runTest(props, input, expected);
|
||||
}
|
||||
|
@ -288,11 +285,11 @@ public class LineMessageReaderTest {
|
|||
Properties props = defaultTestProps();
|
||||
props.put("null.marker", "<NULL>");
|
||||
runTest(props, input,
|
||||
record(null, "value", asList(new RecordHeader("h0", "v0".getBytes(UTF_8)), header)),
|
||||
record(null, "value", List.of(new RecordHeader("h0", "v0".getBytes(UTF_8)), header)),
|
||||
record("key", null),
|
||||
record(null, null, asList(new RecordHeader("h0", "".getBytes(UTF_8)), header)),
|
||||
record("key", null, asList(new RecordHeader("h0", null), header)),
|
||||
record("key", null, asList(new RecordHeader("h0", null), new RecordHeader("h1", "<NULL>value".getBytes(UTF_8))))
|
||||
record(null, null, List.of(new RecordHeader("h0", "".getBytes(UTF_8)), header)),
|
||||
record("key", null, List.of(new RecordHeader("h0", null), header)),
|
||||
record("key", null, List.of(new RecordHeader("h0", null), new RecordHeader("h1", "<NULL>value".getBytes(UTF_8))))
|
||||
);
|
||||
|
||||
// If the null marker is not set
|
||||
|
@ -300,16 +297,16 @@ public class LineMessageReaderTest {
|
|||
props.remove("null.marker");
|
||||
lineReader.configure(propsToStringMap(props));
|
||||
Iterator<ProducerRecord<byte[], byte[]>> iter = lineReader.readRecords(new ByteArrayInputStream(input.getBytes()));
|
||||
assertRecordEquals(record("<NULL>", "value", asList(new RecordHeader("h0", "v0".getBytes(UTF_8)), header)), iter.next());
|
||||
assertRecordEquals(record("<NULL>", "value", List.of(new RecordHeader("h0", "v0".getBytes(UTF_8)), header)), iter.next());
|
||||
// line 2 is not valid anymore
|
||||
KafkaException expectedException = assertThrows(KafkaException.class, iter::next);
|
||||
assertEquals(
|
||||
"No header key separator found in pair '<NULL>' on line number 2",
|
||||
expectedException.getMessage()
|
||||
);
|
||||
assertRecordEquals(record("<NULL>", "<NULL>", asList(new RecordHeader("h0", "".getBytes(UTF_8)), header)), iter.next());
|
||||
assertRecordEquals(record("key", "<NULL>", asList(new RecordHeader("h0", "<NULL>".getBytes(UTF_8)), header)), iter.next());
|
||||
assertRecordEquals(record("key", "<NULL>", asList(
|
||||
assertRecordEquals(record("<NULL>", "<NULL>", List.of(new RecordHeader("h0", "".getBytes(UTF_8)), header)), iter.next());
|
||||
assertRecordEquals(record("key", "<NULL>", List.of(new RecordHeader("h0", "<NULL>".getBytes(UTF_8)), header)), iter.next());
|
||||
assertRecordEquals(record("key", "<NULL>", List.of(
|
||||
new RecordHeader("h0", "<NULL>".getBytes(UTF_8)),
|
||||
new RecordHeader("h1", "<NULL>value".getBytes(UTF_8)))), iter.next()
|
||||
);
|
||||
|
@ -332,7 +329,7 @@ public class LineMessageReaderTest {
|
|||
|
||||
// If the null marker is not set
|
||||
props.remove("null.marker");
|
||||
runTest(props, input, record("key", "value", asList(
|
||||
runTest(props, input, record("key", "value", List.of(
|
||||
new RecordHeader("<NULL>", "v0".getBytes(UTF_8)),
|
||||
new RecordHeader("h1", "v1".getBytes(UTF_8))))
|
||||
);
|
||||
|
|
|
@ -31,8 +31,6 @@ import com.fasterxml.jackson.databind.ObjectMapper;
|
|||
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
|
@ -53,7 +51,7 @@ public class LogDirsCommandTest {
|
|||
@ClusterTest(brokers = 3)
|
||||
public void testLogDirsWithoutBrokers(ClusterInstance clusterInstance) {
|
||||
createTopic(clusterInstance, TOPIC);
|
||||
try (Admin admin = Admin.create(Collections.singletonMap(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()))) {
|
||||
try (Admin admin = Admin.create(Map.of(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()))) {
|
||||
String output = assertDoesNotThrow(() -> execute(fromArgsToOptions("--bootstrap-server", clusterInstance.bootstrapServers(), "--describe"), admin));
|
||||
|
||||
// check all brokers are present
|
||||
|
@ -73,7 +71,7 @@ public class LogDirsCommandTest {
|
|||
@ClusterTest(brokers = 3)
|
||||
public void testLogDirsWithBrokers(ClusterInstance clusterInstance) {
|
||||
createTopic(clusterInstance, TOPIC);
|
||||
try (Admin admin = Admin.create(Collections.singletonMap(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()))) {
|
||||
try (Admin admin = Admin.create(Map.of(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()))) {
|
||||
int brokerId = 0;
|
||||
String output = assertDoesNotThrow(() -> execute(fromArgsToOptions("--bootstrap-server", clusterInstance.bootstrapServers(), "--broker-list", String.valueOf(brokerId), "--describe"), admin));
|
||||
|
||||
|
@ -82,7 +80,7 @@ public class LogDirsCommandTest {
|
|||
clusterInstance.brokerIds().stream().filter(id -> id != brokerId).forEach(id -> assertFalse(output.contains("\"broker\":" + id)));
|
||||
|
||||
// check log dir and topic partition are present
|
||||
Map<Integer, Map<String, LogDirDescription>> logDirs = assertDoesNotThrow(() -> admin.describeLogDirs(Collections.singleton(brokerId)).allDescriptions().get());
|
||||
Map<Integer, Map<String, LogDirDescription>> logDirs = assertDoesNotThrow(() -> admin.describeLogDirs(Set.of(brokerId)).allDescriptions().get());
|
||||
assertEquals(1, logDirs.size());
|
||||
logDirs.forEach((brokerIdValue, logDirInfo) -> {
|
||||
assertFalse(logDirInfo.isEmpty());
|
||||
|
@ -98,7 +96,7 @@ public class LogDirsCommandTest {
|
|||
|
||||
@ClusterTest
|
||||
public void testLogDirsWithNonExistentTopic(ClusterInstance clusterInstance) {
|
||||
try (Admin admin = Admin.create(Collections.singletonMap(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()))) {
|
||||
try (Admin admin = Admin.create(Map.of(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()))) {
|
||||
String output = assertDoesNotThrow(() -> execute(fromArgsToOptions("--bootstrap-server", clusterInstance.bootstrapServers(), "--topic-list", TOPIC, "--describe"), admin));
|
||||
// check all brokers are present
|
||||
clusterInstance.brokerIds().forEach(brokerId -> assertTrue(output.contains("\"broker\":" + brokerId)));
|
||||
|
@ -109,9 +107,9 @@ public class LogDirsCommandTest {
|
|||
logDirs.forEach((brokerId, logDirInfo) ->
|
||||
logDirInfo.forEach((logDir, logDirInfoValue) -> {
|
||||
assertTrue(output.contains("\"logDir\":\"" + logDir + "\""));
|
||||
logDirInfoValue.replicaInfos().forEach((topicPartition, replicaInfo) -> {
|
||||
assertFalse(output.contains("\"partition\":\"" + topicPartition + "\""));
|
||||
});
|
||||
logDirInfoValue.replicaInfos().forEach((topicPartition, replicaInfo) ->
|
||||
assertFalse(output.contains("\"partition\":\"" + topicPartition + "\""))
|
||||
);
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
@ -120,7 +118,7 @@ public class LogDirsCommandTest {
|
|||
public void testLogDirsWithSpecificTopic(ClusterInstance clusterInstance) {
|
||||
createTopic(clusterInstance, TOPIC);
|
||||
createTopic(clusterInstance, "other-topic");
|
||||
try (Admin admin = Admin.create(Collections.singletonMap(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()))) {
|
||||
try (Admin admin = Admin.create(Map.of(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()))) {
|
||||
String output = assertDoesNotThrow(() -> execute(fromArgsToOptions("--bootstrap-server", clusterInstance.bootstrapServers(), "--topic-list", TOPIC, "--describe"), admin));
|
||||
// check all brokers are present
|
||||
clusterInstance.brokerIds().forEach(brokerId -> assertTrue(output.contains("\"broker\":" + brokerId)));
|
||||
|
@ -133,9 +131,9 @@ public class LogDirsCommandTest {
|
|||
logDirs.forEach((brokerId, logDirInfo) ->
|
||||
logDirInfo.forEach((logDir, logDirInfoValue) -> {
|
||||
assertTrue(output.contains("\"logDir\":\"" + logDir + "\""));
|
||||
logDirInfoValue.replicaInfos().keySet().stream().filter(tp -> !tp.topic().equals(TOPIC)).forEach(tp -> {
|
||||
assertFalse(output.contains("\"partition\":\"" + tp + "\""));
|
||||
});
|
||||
logDirInfoValue.replicaInfos().keySet().stream().filter(tp -> !tp.topic().equals(TOPIC)).forEach(tp ->
|
||||
assertFalse(output.contains("\"partition\":\"" + tp + "\""))
|
||||
);
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
@ -143,7 +141,7 @@ public class LogDirsCommandTest {
|
|||
@Test
|
||||
public void shouldThrowWhenQueryingNonExistentBrokers() {
|
||||
Node broker = new Node(1, "hostname", 9092);
|
||||
try (MockAdminClient adminClient = new MockAdminClient(Collections.singletonList(broker), broker)) {
|
||||
try (MockAdminClient adminClient = new MockAdminClient(List.of(broker), broker)) {
|
||||
RuntimeException exception = assertThrows(RuntimeException.class, () -> execute(fromArgsToOptions("--bootstrap-server", "EMPTY", "--broker-list", "0,1,2", "--describe"), adminClient));
|
||||
assertNotNull(exception.getCause());
|
||||
assertEquals(TerseException.class, exception.getCause().getClass());
|
||||
|
@ -155,7 +153,7 @@ public class LogDirsCommandTest {
|
|||
@SuppressWarnings("unchecked")
|
||||
public void shouldNotThrowWhenDuplicatedBrokers() throws JsonProcessingException {
|
||||
Node broker = new Node(1, "hostname", 9092);
|
||||
try (MockAdminClient adminClient = new MockAdminClient(Collections.singletonList(broker), broker)) {
|
||||
try (MockAdminClient adminClient = new MockAdminClient(List.of(broker), broker)) {
|
||||
String standardOutput = execute(fromArgsToOptions("--bootstrap-server", "EMPTY", "--broker-list", "1,1", "--describe"), adminClient);
|
||||
String[] standardOutputLines = standardOutput.split("\n");
|
||||
assertEquals(3, standardOutputLines.length);
|
||||
|
@ -172,13 +170,13 @@ public class LogDirsCommandTest {
|
|||
public void shouldQueryAllBrokersIfNonSpecified() throws JsonProcessingException {
|
||||
Node brokerOne = new Node(1, "hostname", 9092);
|
||||
Node brokerTwo = new Node(2, "hostname", 9092);
|
||||
try (MockAdminClient adminClient = new MockAdminClient(Arrays.asList(brokerTwo, brokerOne), brokerOne)) {
|
||||
try (MockAdminClient adminClient = new MockAdminClient(List.of(brokerTwo, brokerOne), brokerOne)) {
|
||||
String standardOutput = execute(fromArgsToOptions("--bootstrap-server", "EMPTY", "--describe"), adminClient);
|
||||
String[] standardOutputLines = standardOutput.split("\n");
|
||||
assertEquals(3, standardOutputLines.length);
|
||||
Map<String, Object> information = new ObjectMapper().readValue(standardOutputLines[2], HashMap.class);
|
||||
List<Object> brokersInformation = (List<Object>) information.get("brokers");
|
||||
Set<Integer> brokerIds = new HashSet<Integer>() {{
|
||||
Set<Integer> brokerIds = new HashSet<>() {{
|
||||
add((Integer) ((HashMap<String, Object>) brokersInformation.get(0)).get("broker"));
|
||||
add((Integer) ((HashMap<String, Object>) brokersInformation.get(1)).get("broker"));
|
||||
}};
|
||||
|
@ -192,7 +190,7 @@ public class LogDirsCommandTest {
|
|||
public void shouldQuerySpecifiedBroker() throws JsonProcessingException {
|
||||
Node brokerOne = new Node(1, "hostname", 9092);
|
||||
Node brokerTwo = new Node(2, "hostname", 9092);
|
||||
try (MockAdminClient adminClient = new MockAdminClient(Arrays.asList(brokerOne, brokerTwo), brokerOne)) {
|
||||
try (MockAdminClient adminClient = new MockAdminClient(List.of(brokerOne, brokerTwo), brokerOne)) {
|
||||
String standardOutput = execute(fromArgsToOptions("--bootstrap-server", "EMPTY", "--broker-list", "1", "--describe"), adminClient);
|
||||
String[] standardOutputLines = standardOutput.split("\n");
|
||||
assertEquals(3, standardOutputLines.length);
|
||||
|
@ -220,8 +218,8 @@ public class LogDirsCommandTest {
|
|||
}
|
||||
|
||||
private void createTopic(ClusterInstance clusterInstance, String topic) {
|
||||
try (Admin admin = Admin.create(Collections.singletonMap(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()))) {
|
||||
assertDoesNotThrow(() -> admin.createTopics(Collections.singletonList(new NewTopic(topic, Collections.singletonMap(0, Collections.singletonList(0))))).topicId(topic).get());
|
||||
try (Admin admin = Admin.create(Map.of(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()))) {
|
||||
assertDoesNotThrow(() -> admin.createTopics(List.of(new NewTopic(topic, Map.of(0, List.of(0))))).topicId(topic).get());
|
||||
assertDoesNotThrow(() -> clusterInstance.waitTopicCreation(topic, 1));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,7 +30,6 @@ import java.io.IOException;
|
|||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
||||
|
@ -54,7 +53,7 @@ class MetadataQuorumCommandTest {
|
|||
MetadataQuorumCommand.mainNoExit("--bootstrap-server", cluster.bootstrapServers(), "describe", "--replication")
|
||||
);
|
||||
|
||||
List<String> outputs = Arrays.stream(describeOutput.split("\n")).collect(Collectors.toList());
|
||||
List<String> outputs = Arrays.stream(describeOutput.split("\n")).toList();
|
||||
String header = outputs.get(0);
|
||||
List<String> data = outputs.subList(1, outputs.size());
|
||||
|
||||
|
|
|
@ -28,7 +28,6 @@ import org.junit.jupiter.api.Test;
|
|||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.Properties;
|
||||
|
@ -41,7 +40,7 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
|
|||
public class MetadataQuorumCommandUnitTest {
|
||||
@Test
|
||||
public void testRemoveControllerDryRun() {
|
||||
List<String> outputs = Arrays.asList(
|
||||
List<String> outputs = List.of(
|
||||
ToolsTestUtils.captureStandardOut(() ->
|
||||
assertEquals(0, MetadataQuorumCommand.mainNoExit("--bootstrap-server", "localhost:9092",
|
||||
"remove-controller",
|
||||
|
@ -250,7 +249,7 @@ public class MetadataQuorumCommandUnitTest {
|
|||
new MetadataQuorumCommandUnitTestEnv(Optional.
|
||||
of(Uuid.fromString("wZoXPqWoSu6F6c8MkmdyAg")))) {
|
||||
File propsFile = testEnv.writePropertiesFile();
|
||||
List<String> outputs = Arrays.asList(
|
||||
List<String> outputs = List.of(
|
||||
ToolsTestUtils.captureStandardOut(() ->
|
||||
assertEquals(0, MetadataQuorumCommand.mainNoExit("--bootstrap-server", "localhost:9092",
|
||||
"--command-config", propsFile.getAbsolutePath(),
|
||||
|
|
|
@ -38,7 +38,6 @@ import java.io.IOException;
|
|||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Properties;
|
||||
import java.util.SplittableRandom;
|
||||
|
@ -94,7 +93,7 @@ public class ProducerPerformanceTest {
|
|||
|
||||
@Test
|
||||
public void testReadProps() throws Exception {
|
||||
List<String> producerProps = Collections.singletonList("bootstrap.servers=localhost:9000");
|
||||
List<String> producerProps = List.of("bootstrap.servers=localhost:9000");
|
||||
File producerConfig = createTempFile("acks=1");
|
||||
|
||||
Properties prop = ProducerPerformance.readProps(producerProps, producerConfig.getAbsolutePath());
|
||||
|
@ -360,7 +359,7 @@ public class ProducerPerformanceTest {
|
|||
|
||||
@Test
|
||||
public void testClientIdOverride() throws Exception {
|
||||
List<String> producerProps = Collections.singletonList("client.id=producer-1");
|
||||
List<String> producerProps = List.of("client.id=producer-1");
|
||||
|
||||
Properties prop = ProducerPerformance.readProps(producerProps, null);
|
||||
|
||||
|
@ -370,7 +369,7 @@ public class ProducerPerformanceTest {
|
|||
|
||||
@Test
|
||||
public void testDefaultClientId() throws Exception {
|
||||
List<String> producerProps = Collections.singletonList("acks=1");
|
||||
List<String> producerProps = List.of("acks=1");
|
||||
|
||||
Properties prop = ProducerPerformance.readProps(producerProps, null);
|
||||
|
||||
|
@ -391,9 +390,7 @@ public class ProducerPerformanceTest {
|
|||
ProducerPerformance.Stats stats = new ProducerPerformance.Stats(numRecords, false);
|
||||
for (long i = 0; i < numRecords; i++) {
|
||||
final Callback callback = new ProducerPerformance.PerfCallback(0, 100, stats, null);
|
||||
CompletableFuture.runAsync(() -> {
|
||||
callback.onCompletion(null, null);
|
||||
}, singleThreaded);
|
||||
CompletableFuture.runAsync(() -> callback.onCompletion(null, null), singleThreaded);
|
||||
}
|
||||
|
||||
singleThreaded.shutdown();
|
||||
|
|
|
@ -43,8 +43,7 @@ import java.net.InetAddress;
|
|||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
|
@ -186,41 +185,41 @@ public class PushHttpMetricsReporterTest {
|
|||
verifyConfigure();
|
||||
KafkaMetric metric1 = new KafkaMetric(
|
||||
new Object(),
|
||||
new MetricName("name1", "group1", "desc1", Collections.singletonMap("key1", "value1")),
|
||||
new MetricName("name1", "group1", "desc1", Map.of("key1", "value1")),
|
||||
new ImmutableValue<>(1.0),
|
||||
null,
|
||||
time
|
||||
);
|
||||
KafkaMetric newMetric1 = new KafkaMetric(
|
||||
new Object(),
|
||||
new MetricName("name1", "group1", "desc1", Collections.singletonMap("key1", "value1")),
|
||||
new MetricName("name1", "group1", "desc1", Map.of("key1", "value1")),
|
||||
new ImmutableValue<>(-1.0),
|
||||
null,
|
||||
time
|
||||
);
|
||||
KafkaMetric metric2 = new KafkaMetric(
|
||||
new Object(),
|
||||
new MetricName("name2", "group2", "desc2", Collections.singletonMap("key2", "value2")),
|
||||
new MetricName("name2", "group2", "desc2", Map.of("key2", "value2")),
|
||||
new ImmutableValue<>(2.0),
|
||||
null,
|
||||
time
|
||||
);
|
||||
KafkaMetric metric3 = new KafkaMetric(
|
||||
new Object(),
|
||||
new MetricName("name3", "group3", "desc3", Collections.singletonMap("key3", "value3")),
|
||||
new MetricName("name3", "group3", "desc3", Map.of("key3", "value3")),
|
||||
new ImmutableValue<>(3.0),
|
||||
null,
|
||||
time
|
||||
);
|
||||
KafkaMetric metric4 = new KafkaMetric(
|
||||
new Object(),
|
||||
new MetricName("name4", "group4", "desc4", Collections.singletonMap("key4", "value4")),
|
||||
new MetricName("name4", "group4", "desc4", Map.of("key4", "value4")),
|
||||
new ImmutableValue<>("value4"),
|
||||
null,
|
||||
time
|
||||
);
|
||||
|
||||
reporter.init(Arrays.asList(metric1, metric2, metric4));
|
||||
reporter.init(List.of(metric1, metric2, metric4));
|
||||
reporter.metricChange(newMetric1); // added in init, modified
|
||||
reporter.metricChange(metric3); // added by change
|
||||
reporter.metricRemoval(metric2); // added in init, deleted by removal
|
||||
|
@ -236,7 +235,7 @@ public class PushHttpMetricsReporterTest {
|
|||
JsonNode metrics = payload.get("metrics");
|
||||
assertTrue(metrics.isArray());
|
||||
assertEquals(3, metrics.size());
|
||||
List<JsonNode> metricsList = Arrays.asList(metrics.get(0), metrics.get(1), metrics.get(2));
|
||||
List<JsonNode> metricsList = new ArrayList<>(List.of(metrics.get(0), metrics.get(1), metrics.get(2)));
|
||||
// Sort metrics based on name so that we can verify the value for each metric below
|
||||
metricsList.sort(Comparator.comparing(m -> m.get("name").textValue()));
|
||||
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.apache.kafka.common.record.SimpleRecord;
|
|||
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.stream.IntStream;
|
||||
|
@ -43,8 +42,8 @@ public class ReplicaVerificationToolTest {
|
|||
}};
|
||||
|
||||
ReplicaVerificationTool.ReplicaBuffer replicaBuffer =
|
||||
new ReplicaVerificationTool.ReplicaBuffer(expectedReplicasPerTopicAndPartition, Collections.emptyMap(), 2, 0);
|
||||
expectedReplicasPerTopicAndPartition.forEach((tp, numReplicas) -> {
|
||||
new ReplicaVerificationTool.ReplicaBuffer(expectedReplicasPerTopicAndPartition, Map.of(), 2, 0);
|
||||
expectedReplicasPerTopicAndPartition.forEach((tp, numReplicas) ->
|
||||
IntStream.range(0, numReplicas).forEach(replicaId -> {
|
||||
SimpleRecord[] records = IntStream.rangeClosed(0, 5)
|
||||
.mapToObj(index -> new SimpleRecord(("key " + index).getBytes(), ("value " + index).getBytes()))
|
||||
|
@ -60,8 +59,8 @@ public class ReplicaVerificationToolTest {
|
|||
.setRecords(memoryRecords);
|
||||
|
||||
replicaBuffer.addFetchedData(tp, replicaId, partitionData);
|
||||
});
|
||||
});
|
||||
})
|
||||
);
|
||||
|
||||
replicaBuffer.verifyCheckSum(line -> sb.append(format("%s%n", line)));
|
||||
String output = sb.toString().trim();
|
||||
|
|
|
@ -33,9 +33,9 @@ import org.junit.jupiter.api.Timeout;
|
|||
|
||||
import java.time.Duration;
|
||||
import java.time.Instant;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
|
@ -53,7 +53,7 @@ public class StreamsResetterTest {
|
|||
|
||||
@BeforeEach
|
||||
public void beforeEach() {
|
||||
consumer.assign(Collections.singletonList(topicPartition));
|
||||
consumer.assign(List.of(topicPartition));
|
||||
consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 0L, new byte[] {}, new byte[] {}));
|
||||
consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 1L, new byte[] {}, new byte[] {}));
|
||||
consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 2L, new byte[] {}, new byte[] {}));
|
||||
|
@ -82,7 +82,7 @@ public class StreamsResetterTest {
|
|||
final long beginningOffset = 5L;
|
||||
final long endOffset = 10L;
|
||||
final MockConsumer<byte[], byte[]> emptyConsumer = new MockConsumer<>(AutoOffsetResetStrategy.EARLIEST.name());
|
||||
emptyConsumer.assign(Collections.singletonList(topicPartition));
|
||||
emptyConsumer.assign(List.of(topicPartition));
|
||||
|
||||
final Map<TopicPartition, Long> beginningOffsetsMap = new HashMap<>();
|
||||
beginningOffsetsMap.put(topicPartition, beginningOffset);
|
||||
|
@ -255,10 +255,10 @@ public class StreamsResetterTest {
|
|||
public void shouldDeleteTopic() throws InterruptedException, ExecutionException {
|
||||
final Cluster cluster = createCluster(1);
|
||||
try (final MockAdminClient adminClient = new MockAdminClient(cluster.nodes(), cluster.nodeById(0))) {
|
||||
final TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), Collections.emptyList());
|
||||
adminClient.addTopic(false, TOPIC, Collections.singletonList(topicPartitionInfo), null);
|
||||
streamsResetter.doDelete(Collections.singletonList(TOPIC), adminClient);
|
||||
assertEquals(Collections.emptySet(), adminClient.listTopics().names().get());
|
||||
final TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), List.of());
|
||||
adminClient.addTopic(false, TOPIC, List.of(topicPartitionInfo), null);
|
||||
streamsResetter.doDelete(List.of(TOPIC), adminClient);
|
||||
assertEquals(Set.of(), adminClient.listTopics().names().get());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -274,7 +274,7 @@ public class StreamsResetterTest {
|
|||
public void testResetToDatetimeWhenPartitionIsEmptyResetsToLatestOffset() {
|
||||
final long beginningAndEndOffset = 5L; // Empty partition implies beginning offset == end offset
|
||||
final MockConsumer<byte[], byte[]> emptyConsumer = new EmptyPartitionConsumer<>(AutoOffsetResetStrategy.EARLIEST.name());
|
||||
emptyConsumer.assign(Collections.singletonList(topicPartition));
|
||||
emptyConsumer.assign(List.of(topicPartition));
|
||||
|
||||
final Map<TopicPartition, Long> beginningOffsetsMap = new HashMap<>();
|
||||
beginningOffsetsMap.put(topicPartition, beginningAndEndOffset);
|
||||
|
@ -299,8 +299,8 @@ public class StreamsResetterTest {
|
|||
nodes.put(i, new Node(i, "localhost", 8121 + i));
|
||||
}
|
||||
return new Cluster("mockClusterId", nodes.values(),
|
||||
Collections.emptySet(), Collections.emptySet(),
|
||||
Collections.emptySet(), nodes.get(0));
|
||||
Set.of(), Set.of(),
|
||||
Set.of(), nodes.get(0));
|
||||
}
|
||||
|
||||
private static class EmptyPartitionConsumer<K, V> extends MockConsumer<K, V> {
|
||||
|
|
|
@ -30,7 +30,6 @@ import java.io.IOException;
|
|||
import java.io.PrintStream;
|
||||
import java.util.AbstractMap.SimpleImmutableEntry;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
|
@ -146,7 +145,7 @@ public class ToolsTestUtils {
|
|||
public static void removePartitionReplicaThrottles(Admin adminClient, Set<TopicPartition> partitions) throws ExecutionException, InterruptedException {
|
||||
Map<ConfigResource, Collection<AlterConfigOp>> throttles = partitions.stream().collect(Collectors.toMap(
|
||||
tp -> new ConfigResource(ConfigResource.Type.TOPIC, tp.topic()),
|
||||
tp -> Arrays.asList(
|
||||
tp -> List.of(
|
||||
new AlterConfigOp(new ConfigEntry(QuotaConfig.LEADER_REPLICATION_THROTTLED_REPLICAS_CONFIG, ""),
|
||||
AlterConfigOp.OpType.DELETE),
|
||||
new AlterConfigOp(new ConfigEntry(QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_REPLICAS_CONFIG, ""),
|
||||
|
|
|
@ -110,7 +110,7 @@ public class TopicCommandTest {
|
|||
|
||||
@Test
|
||||
public void testIsNotUnderReplicatedWhenAdding() {
|
||||
List<Integer> replicaIds = Arrays.asList(1, 2);
|
||||
List<Integer> replicaIds = List.of(1, 2);
|
||||
List<Node> replicas = new ArrayList<>();
|
||||
for (int id : replicaIds) {
|
||||
replicas.add(new Node(id, "localhost", 9090 + id));
|
||||
|
@ -118,9 +118,9 @@ public class TopicCommandTest {
|
|||
|
||||
TopicCommand.PartitionDescription partitionDescription = new TopicCommand.PartitionDescription("test-topic",
|
||||
new TopicPartitionInfo(0, new Node(1, "localhost", 9091), replicas,
|
||||
Collections.singletonList(new Node(1, "localhost", 9091))),
|
||||
List.of(new Node(1, "localhost", 9091))),
|
||||
null, false,
|
||||
new PartitionReassignment(replicaIds, Arrays.asList(2), Collections.emptyList())
|
||||
new PartitionReassignment(replicaIds, List.of(2), List.of())
|
||||
);
|
||||
|
||||
assertFalse(partitionDescription.isUnderReplicated());
|
||||
|
@ -236,9 +236,9 @@ public class TopicCommandTest {
|
|||
public void testParseAssignment() {
|
||||
Map<Integer, List<Integer>> actualAssignment = TopicCommand.parseReplicaAssignment("5:4,3:2,1:0");
|
||||
Map<Integer, List<Integer>> expectedAssignment = new HashMap<>();
|
||||
expectedAssignment.put(0, Arrays.asList(5, 4));
|
||||
expectedAssignment.put(1, Arrays.asList(3, 2));
|
||||
expectedAssignment.put(2, Arrays.asList(1, 0));
|
||||
expectedAssignment.put(0, List.of(5, 4));
|
||||
expectedAssignment.put(1, List.of(3, 2));
|
||||
expectedAssignment.put(2, List.of(1, 0));
|
||||
assertEquals(expectedAssignment, actualAssignment);
|
||||
}
|
||||
|
||||
|
@ -257,7 +257,7 @@ public class TopicCommandTest {
|
|||
})));
|
||||
|
||||
NewTopic expectedNewTopic = new NewTopic(topicName, Optional.empty(), Optional.empty())
|
||||
.configs(Collections.emptyMap());
|
||||
.configs(Map.of());
|
||||
|
||||
verify(adminClient, times(1)).createTopics(
|
||||
eq(Set.of(expectedNewTopic)),
|
||||
|
@ -285,7 +285,7 @@ public class TopicCommandTest {
|
|||
assertInstanceOf(ThrottlingQuotaExceededException.class, exception.getCause());
|
||||
|
||||
verify(adminClient).deleteTopics(
|
||||
argThat((Collection<String> topics) -> topics.equals(Arrays.asList(topicName))),
|
||||
argThat((Collection<String> topics) -> topics.equals(List.of(topicName))),
|
||||
argThat((DeleteTopicsOptions options) -> !options.shouldRetryOnQuotaViolation()));
|
||||
}
|
||||
|
||||
|
@ -298,9 +298,9 @@ public class TopicCommandTest {
|
|||
when(adminClient.listTopics(any())).thenReturn(listResult);
|
||||
|
||||
TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, new Node(0, "", 0),
|
||||
Collections.emptyList(), Collections.emptyList());
|
||||
List.of(), List.of());
|
||||
DescribeTopicsResult describeResult = AdminClientTestUtils.describeTopicsResult(topicName,
|
||||
new TopicDescription(topicName, false, Collections.singletonList(topicPartitionInfo)));
|
||||
new TopicDescription(topicName, false, List.of(topicPartitionInfo)));
|
||||
when(adminClient.describeTopics(anyCollection())).thenReturn(describeResult);
|
||||
|
||||
CreatePartitionsResult result = AdminClientTestUtils.createPartitionsResult(topicName, Errors.THROTTLING_QUOTA_EXCEEDED.exception());
|
||||
|
@ -365,7 +365,7 @@ public class TopicCommandTest {
|
|||
rackInfo.put(4, infoPerBroker5);
|
||||
rackInfo.put(5, infoPerBroker6);
|
||||
|
||||
return Collections.singletonList(ClusterConfig.defaultBuilder()
|
||||
return List.of(ClusterConfig.defaultBuilder()
|
||||
.setBrokers(6)
|
||||
.setServerProperties(serverProp)
|
||||
.setPerServerProperties(rackInfo)
|
||||
|
@ -385,13 +385,13 @@ public class TopicCommandTest {
|
|||
String testTopicName = TestUtils.randomString(10);
|
||||
|
||||
try (Admin adminClient = clusterInstance.admin()) {
|
||||
adminClient.createTopics(Collections.singletonList(new NewTopic(testTopicName, defaultNumPartitions, defaultReplicationFactor)));
|
||||
adminClient.createTopics(List.of(new NewTopic(testTopicName, defaultNumPartitions, defaultReplicationFactor)));
|
||||
|
||||
clusterInstance.waitTopicCreation(testTopicName, defaultNumPartitions);
|
||||
Assertions.assertTrue(adminClient.listTopics().names().get().contains(testTopicName),
|
||||
"Admin client didn't see the created topic. It saw: " + adminClient.listTopics().names().get());
|
||||
|
||||
adminClient.deleteTopics(Collections.singletonList(testTopicName));
|
||||
adminClient.deleteTopics(List.of(testTopicName));
|
||||
clusterInstance.waitTopicDeletion(testTopicName);
|
||||
Assertions.assertTrue(adminClient.listTopics().names().get().isEmpty(),
|
||||
"Admin client see the created topic. It saw: " + adminClient.listTopics().names().get());
|
||||
|
@ -409,14 +409,14 @@ public class TopicCommandTest {
|
|||
String testTopicName = TestUtils.randomString(10);
|
||||
|
||||
try (Admin adminClient = clusterInstance.admin()) {
|
||||
adminClient.createTopics(Collections.singletonList(new NewTopic(testTopicName, defaultNumPartitions, defaultReplicationFactor)));
|
||||
adminClient.createTopics(List.of(new NewTopic(testTopicName, defaultNumPartitions, defaultReplicationFactor)));
|
||||
|
||||
clusterInstance.waitTopicCreation(testTopicName, defaultNumPartitions);
|
||||
Assertions.assertTrue(adminClient.listTopics().names().get().contains(testTopicName),
|
||||
"Admin client didn't see the created topic. It saw: " + adminClient.listTopics().names().get());
|
||||
|
||||
List<TopicPartitionInfo> partitions = adminClient
|
||||
.describeTopics(Collections.singletonList(testTopicName))
|
||||
.describeTopics(List.of(testTopicName))
|
||||
.allTopicNames()
|
||||
.get()
|
||||
.get(testTopicName)
|
||||
|
@ -424,7 +424,7 @@ public class TopicCommandTest {
|
|||
Assertions.assertEquals(defaultNumPartitions, partitions.size(), "Unequal partition size: " + partitions.size());
|
||||
Assertions.assertEquals(defaultReplicationFactor, (short) partitions.get(0).replicas().size(), "Unequal replication factor: " + partitions.get(0).replicas().size());
|
||||
|
||||
adminClient.deleteTopics(Collections.singletonList(testTopicName));
|
||||
adminClient.deleteTopics(List.of(testTopicName));
|
||||
clusterInstance.waitTopicDeletion(testTopicName);
|
||||
Assertions.assertTrue(adminClient.listTopics().names().get().isEmpty(),
|
||||
"Admin client see the created topic. It saw: " + adminClient.listTopics().names().get());
|
||||
|
@ -442,10 +442,10 @@ public class TopicCommandTest {
|
|||
String testTopicName = TestUtils.randomString(10);
|
||||
|
||||
try (Admin adminClient = clusterInstance.admin()) {
|
||||
adminClient.createTopics(Collections.singletonList(new NewTopic(testTopicName, 2, defaultReplicationFactor)));
|
||||
adminClient.createTopics(List.of(new NewTopic(testTopicName, 2, defaultReplicationFactor)));
|
||||
clusterInstance.waitTopicCreation(testTopicName, 2);
|
||||
List<TopicPartitionInfo> partitions = adminClient
|
||||
.describeTopics(Collections.singletonList(testTopicName))
|
||||
.describeTopics(List.of(testTopicName))
|
||||
.allTopicNames()
|
||||
.get()
|
||||
.get(testTopicName)
|
||||
|
@ -460,10 +460,10 @@ public class TopicCommandTest {
|
|||
String testTopicName = TestUtils.randomString(10);
|
||||
|
||||
try (Admin adminClient = clusterInstance.admin()) {
|
||||
adminClient.createTopics(Collections.singletonList(new NewTopic(testTopicName, defaultNumPartitions, (short) 2)));
|
||||
adminClient.createTopics(List.of(new NewTopic(testTopicName, defaultNumPartitions, (short) 2)));
|
||||
clusterInstance.waitTopicCreation(testTopicName, defaultNumPartitions);
|
||||
List<TopicPartitionInfo> partitions = adminClient
|
||||
.describeTopics(Collections.singletonList(testTopicName))
|
||||
.describeTopics(List.of(testTopicName))
|
||||
.allTopicNames()
|
||||
.get()
|
||||
.get(testTopicName)
|
||||
|
@ -483,11 +483,11 @@ public class TopicCommandTest {
|
|||
Map<String, String> topicConfig = new HashMap<>();
|
||||
topicConfig.put(TopicConfig.DELETE_RETENTION_MS_CONFIG, "1000");
|
||||
|
||||
adminClient.createTopics(Collections.singletonList(new NewTopic(testTopicName, 2, (short) 2).configs(topicConfig)));
|
||||
adminClient.createTopics(List.of(new NewTopic(testTopicName, 2, (short) 2).configs(topicConfig)));
|
||||
clusterInstance.waitTopicCreation(testTopicName, 2);
|
||||
|
||||
|
||||
Config configs = adminClient.describeConfigs(Collections.singleton(configResource)).all().get().get(configResource);
|
||||
Config configs = adminClient.describeConfigs(Set.of(configResource)).all().get().get(configResource);
|
||||
assertEquals(1000, Integer.valueOf(configs.get("delete.retention.ms").value()),
|
||||
"Config not set correctly: " + configs.get("delete.retention.ms").value());
|
||||
}
|
||||
|
@ -502,7 +502,7 @@ public class TopicCommandTest {
|
|||
clusterInstance, "--create", "--partitions", Integer.toString(defaultNumPartitions), "--replication-factor", "1",
|
||||
"--topic", testTopicName);
|
||||
|
||||
adminClient.createTopics(Collections.singletonList(new NewTopic(testTopicName, defaultNumPartitions, defaultReplicationFactor)));
|
||||
adminClient.createTopics(List.of(new NewTopic(testTopicName, defaultNumPartitions, defaultReplicationFactor)));
|
||||
clusterInstance.waitTopicCreation(testTopicName, defaultNumPartitions);
|
||||
|
||||
// try to re-create the topic
|
||||
|
@ -516,7 +516,7 @@ public class TopicCommandTest {
|
|||
String testTopicName = TestUtils.randomString(10);
|
||||
try (Admin adminClient = clusterInstance.admin();
|
||||
TopicCommand.TopicService topicService = new TopicCommand.TopicService(adminClient)) {
|
||||
adminClient.createTopics(Collections.singletonList(new NewTopic(testTopicName, defaultNumPartitions, defaultReplicationFactor)));
|
||||
adminClient.createTopics(List.of(new NewTopic(testTopicName, defaultNumPartitions, defaultReplicationFactor)));
|
||||
clusterInstance.waitTopicCreation(testTopicName, defaultNumPartitions);
|
||||
|
||||
TopicCommand.TopicCommandOptions createOpts =
|
||||
|
@ -526,7 +526,7 @@ public class TopicCommandTest {
|
|||
}
|
||||
|
||||
private List<Integer> getPartitionReplicas(List<TopicPartitionInfo> partitions, int partitionNumber) {
|
||||
return partitions.get(partitionNumber).replicas().stream().map(Node::id).collect(Collectors.toList());
|
||||
return partitions.get(partitionNumber).replicas().stream().map(Node::id).toList();
|
||||
}
|
||||
|
||||
@ClusterTemplate("generate")
|
||||
|
@ -535,15 +535,15 @@ public class TopicCommandTest {
|
|||
try (Admin adminClient = clusterInstance.admin()) {
|
||||
String testTopicName = TestUtils.randomString(10);
|
||||
|
||||
replicaAssignmentMap.put(0, Arrays.asList(5, 4));
|
||||
replicaAssignmentMap.put(1, Arrays.asList(3, 2));
|
||||
replicaAssignmentMap.put(2, Arrays.asList(1, 0));
|
||||
replicaAssignmentMap.put(0, List.of(5, 4));
|
||||
replicaAssignmentMap.put(1, List.of(3, 2));
|
||||
replicaAssignmentMap.put(2, List.of(1, 0));
|
||||
|
||||
adminClient.createTopics(Collections.singletonList(new NewTopic(testTopicName, replicaAssignmentMap)));
|
||||
adminClient.createTopics(List.of(new NewTopic(testTopicName, replicaAssignmentMap)));
|
||||
clusterInstance.waitTopicCreation(testTopicName, 3);
|
||||
|
||||
List<TopicPartitionInfo> partitions = adminClient
|
||||
.describeTopics(Collections.singletonList(testTopicName))
|
||||
.describeTopics(List.of(testTopicName))
|
||||
.allTopicNames()
|
||||
.get()
|
||||
.get(testTopicName)
|
||||
|
@ -551,11 +551,11 @@ public class TopicCommandTest {
|
|||
|
||||
assertEquals(3, partitions.size(),
|
||||
"Unequal partition size: " + partitions.size());
|
||||
assertEquals(Arrays.asList(5, 4), getPartitionReplicas(partitions, 0),
|
||||
assertEquals(List.of(5, 4), getPartitionReplicas(partitions, 0),
|
||||
"Unexpected replica assignment: " + getPartitionReplicas(partitions, 0));
|
||||
assertEquals(Arrays.asList(3, 2), getPartitionReplicas(partitions, 1),
|
||||
assertEquals(List.of(3, 2), getPartitionReplicas(partitions, 1),
|
||||
"Unexpected replica assignment: " + getPartitionReplicas(partitions, 1));
|
||||
assertEquals(Arrays.asList(1, 0), getPartitionReplicas(partitions, 2),
|
||||
assertEquals(List.of(1, 0), getPartitionReplicas(partitions, 2),
|
||||
"Unexpected replica assignment: " + getPartitionReplicas(partitions, 2));
|
||||
}
|
||||
}
|
||||
|
@ -610,7 +610,7 @@ public class TopicCommandTest {
|
|||
public void testListTopics(ClusterInstance clusterInstance) throws InterruptedException {
|
||||
String testTopicName = TestUtils.randomString(10);
|
||||
try (Admin adminClient = clusterInstance.admin()) {
|
||||
adminClient.createTopics(Collections.singletonList(new NewTopic(testTopicName, defaultNumPartitions, defaultReplicationFactor)));
|
||||
adminClient.createTopics(List.of(new NewTopic(testTopicName, defaultNumPartitions, defaultReplicationFactor)));
|
||||
clusterInstance.waitTopicCreation(testTopicName, defaultNumPartitions);
|
||||
|
||||
String output = captureListTopicStandardOut(clusterInstance, buildTopicCommandOptionsWithBootstrap(clusterInstance, "--list"));
|
||||
|
@ -626,9 +626,9 @@ public class TopicCommandTest {
|
|||
String topic3 = "oooof.testTopic1";
|
||||
int partition = 2;
|
||||
short replicationFactor = 2;
|
||||
adminClient.createTopics(Collections.singletonList(new NewTopic(topic1, partition, replicationFactor)));
|
||||
adminClient.createTopics(Collections.singletonList(new NewTopic(topic2, partition, replicationFactor)));
|
||||
adminClient.createTopics(Collections.singletonList(new NewTopic(topic3, partition, replicationFactor)));
|
||||
adminClient.createTopics(List.of(new NewTopic(topic1, partition, replicationFactor)));
|
||||
adminClient.createTopics(List.of(new NewTopic(topic2, partition, replicationFactor)));
|
||||
adminClient.createTopics(List.of(new NewTopic(topic3, partition, replicationFactor)));
|
||||
clusterInstance.waitTopicCreation(topic1, partition);
|
||||
clusterInstance.waitTopicCreation(topic2, partition);
|
||||
clusterInstance.waitTopicCreation(topic3, partition);
|
||||
|
@ -647,7 +647,7 @@ public class TopicCommandTest {
|
|||
String hiddenConsumerTopic = Topic.GROUP_METADATA_TOPIC_NAME;
|
||||
int partition = 2;
|
||||
short replicationFactor = 2;
|
||||
adminClient.createTopics(Collections.singletonList(new NewTopic(topic1, partition, replicationFactor)));
|
||||
adminClient.createTopics(List.of(new NewTopic(topic1, partition, replicationFactor)));
|
||||
clusterInstance.waitTopicCreation(topic1, partition);
|
||||
|
||||
String output = captureListTopicStandardOut(clusterInstance, buildTopicCommandOptionsWithBootstrap(clusterInstance, "--list", "--exclude-internal"));
|
||||
|
@ -663,7 +663,7 @@ public class TopicCommandTest {
|
|||
TopicCommand.TopicService topicService = new TopicCommand.TopicService(adminClient)) {
|
||||
int partition = 2;
|
||||
short replicationFactor = 2;
|
||||
adminClient.createTopics(Collections.singletonList(new NewTopic(testTopicName, partition, replicationFactor)));
|
||||
adminClient.createTopics(List.of(new NewTopic(testTopicName, partition, replicationFactor)));
|
||||
clusterInstance.waitTopicCreation(testTopicName, partition);
|
||||
topicService.alterTopic(buildTopicCommandOptionsWithBootstrap(clusterInstance, "--alter", "--topic", testTopicName, "--partitions", "3"));
|
||||
|
||||
|
@ -676,7 +676,7 @@ public class TopicCommandTest {
|
|||
() -> clusterInstance.brokers().values().stream().allMatch(
|
||||
b -> b.metadataCache().numPartitions(testTopicName).orElse(0) == 3),
|
||||
TestUtils.DEFAULT_MAX_WAIT_MS, "Timeout waiting for new assignment propagating to broker");
|
||||
TopicDescription topicDescription = adminClient.describeTopics(Collections.singletonList(testTopicName)).topicNameValues().get(testTopicName).get();
|
||||
TopicDescription topicDescription = adminClient.describeTopics(List.of(testTopicName)).topicNameValues().get(testTopicName).get();
|
||||
assertEquals(3, topicDescription.partitions().size(), "Expected partition count to be 3. Got: " + topicDescription.partitions().size());
|
||||
}
|
||||
}
|
||||
|
@ -689,7 +689,7 @@ public class TopicCommandTest {
|
|||
int partition = 2;
|
||||
short replicationFactor = 2;
|
||||
|
||||
adminClient.createTopics(Collections.singletonList(new NewTopic(testTopicName, partition, replicationFactor)));
|
||||
adminClient.createTopics(List.of(new NewTopic(testTopicName, partition, replicationFactor)));
|
||||
clusterInstance.waitTopicCreation(testTopicName, partition);
|
||||
|
||||
topicService.alterTopic(buildTopicCommandOptionsWithBootstrap(clusterInstance, "--alter",
|
||||
|
@ -705,10 +705,10 @@ public class TopicCommandTest {
|
|||
b -> b.metadataCache().numPartitions(testTopicName).orElse(0) == 3),
|
||||
TestUtils.DEFAULT_MAX_WAIT_MS, "Timeout waiting for new assignment propagating to broker");
|
||||
|
||||
TopicDescription topicDescription = adminClient.describeTopics(Collections.singletonList(testTopicName)).topicNameValues().get(testTopicName).get();
|
||||
TopicDescription topicDescription = adminClient.describeTopics(List.of(testTopicName)).topicNameValues().get(testTopicName).get();
|
||||
assertEquals(3, topicDescription.partitions().size(), "Expected partition count to be 3. Got: " + topicDescription.partitions().size());
|
||||
List<Integer> partitionReplicas = getPartitionReplicas(topicDescription.partitions(), 2);
|
||||
assertEquals(Arrays.asList(4, 2), partitionReplicas, "Expected to have replicas 4,2. Got: " + partitionReplicas);
|
||||
assertEquals(List.of(4, 2), partitionReplicas, "Expected to have replicas 4,2. Got: " + partitionReplicas);
|
||||
|
||||
}
|
||||
}
|
||||
|
@ -721,7 +721,7 @@ public class TopicCommandTest {
|
|||
|
||||
int partition = 2;
|
||||
short replicationFactor = 2;
|
||||
adminClient.createTopics(Collections.singletonList(new NewTopic(testTopicName, partition, replicationFactor)));
|
||||
adminClient.createTopics(List.of(new NewTopic(testTopicName, partition, replicationFactor)));
|
||||
clusterInstance.waitTopicCreation(testTopicName, partition);
|
||||
|
||||
assertThrows(ExecutionException.class,
|
||||
|
@ -739,7 +739,7 @@ public class TopicCommandTest {
|
|||
TopicCommand.TopicService topicService = new TopicCommand.TopicService(adminClient)) {
|
||||
int partition = 2;
|
||||
short replicationFactor = 2;
|
||||
adminClient.createTopics(Collections.singletonList(new NewTopic(testTopicName, partition, replicationFactor)));
|
||||
adminClient.createTopics(List.of(new NewTopic(testTopicName, partition, replicationFactor)));
|
||||
clusterInstance.waitTopicCreation(testTopicName, partition);
|
||||
|
||||
assertThrows(ExecutionException.class,
|
||||
|
@ -756,7 +756,7 @@ public class TopicCommandTest {
|
|||
|
||||
try (Admin adminClient = clusterInstance.admin();
|
||||
TopicCommand.TopicService topicService = new TopicCommand.TopicService(adminClient)) {
|
||||
adminClient.createTopics(Collections.singletonList(new NewTopic(testTopicName, defaultNumPartitions, defaultReplicationFactor)));
|
||||
adminClient.createTopics(List.of(new NewTopic(testTopicName, defaultNumPartitions, defaultReplicationFactor)));
|
||||
clusterInstance.waitTopicCreation(testTopicName, defaultNumPartitions);
|
||||
|
||||
assertThrows(ExecutionException.class,
|
||||
|
@ -805,15 +805,15 @@ public class TopicCommandTest {
|
|||
|
||||
int numPartitions = 18;
|
||||
int replicationFactor = 3;
|
||||
adminClient.createTopics(Collections.singletonList(new NewTopic(testTopicName, numPartitions, (short) replicationFactor)));
|
||||
adminClient.createTopics(List.of(new NewTopic(testTopicName, numPartitions, (short) replicationFactor)));
|
||||
clusterInstance.waitTopicCreation(testTopicName, numPartitions);
|
||||
|
||||
Map<Integer, List<Integer>> assignment = adminClient.describeTopics(Collections.singletonList(testTopicName))
|
||||
Map<Integer, List<Integer>> assignment = adminClient.describeTopics(List.of(testTopicName))
|
||||
.allTopicNames().get().get(testTopicName).partitions()
|
||||
.stream()
|
||||
.collect(Collectors.toMap(
|
||||
info -> info.partition(),
|
||||
info -> info.replicas().stream().map(Node::id).collect(Collectors.toList())));
|
||||
TopicPartitionInfo::partition,
|
||||
info -> info.replicas().stream().map(Node::id).toList()));
|
||||
checkReplicaDistribution(assignment, rackInfo, rackInfo.size(), numPartitions,
|
||||
replicationFactor, true, true, true);
|
||||
|
||||
|
@ -832,9 +832,9 @@ public class TopicCommandTest {
|
|||
() -> clusterInstance.brokers().values().stream().allMatch(p -> p.metadataCache().numPartitions(testTopicName).orElse(0) == alteredNumPartitions),
|
||||
TestUtils.DEFAULT_MAX_WAIT_MS, "Timeout waiting for new assignment propagating to broker");
|
||||
|
||||
assignment = adminClient.describeTopics(Collections.singletonList(testTopicName))
|
||||
assignment = adminClient.describeTopics(List.of(testTopicName))
|
||||
.allTopicNames().get().get(testTopicName).partitions().stream()
|
||||
.collect(Collectors.toMap(info -> info.partition(), info -> info.replicas().stream().map(Node::id).collect(Collectors.toList())));
|
||||
.collect(Collectors.toMap(TopicPartitionInfo::partition, info -> info.replicas().stream().map(Node::id).toList()));
|
||||
checkReplicaDistribution(assignment, rackInfo, rackInfo.size(), alteredNumPartitions, replicationFactor,
|
||||
true, true, true);
|
||||
|
||||
|
@ -850,11 +850,11 @@ public class TopicCommandTest {
|
|||
String cleanUpPolicy = "compact";
|
||||
HashMap<String, String> topicConfig = new HashMap<>();
|
||||
topicConfig.put(TopicConfig.CLEANUP_POLICY_CONFIG, cleanUpPolicy);
|
||||
adminClient.createTopics(Collections.singletonList(new NewTopic(testTopicName, defaultNumPartitions, defaultReplicationFactor).configs(topicConfig)));
|
||||
adminClient.createTopics(List.of(new NewTopic(testTopicName, defaultNumPartitions, defaultReplicationFactor).configs(topicConfig)));
|
||||
clusterInstance.waitTopicCreation(testTopicName, defaultNumPartitions);
|
||||
|
||||
ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, testTopicName);
|
||||
Config props = adminClient.describeConfigs(Collections.singleton(configResource)).all().get().get(configResource);
|
||||
Config props = adminClient.describeConfigs(Set.of(configResource)).all().get().get(configResource);
|
||||
assertNotNull(props.get(TopicConfig.CLEANUP_POLICY_CONFIG), "Properties after creation don't contain " + cleanUpPolicy);
|
||||
assertEquals(cleanUpPolicy, props.get(TopicConfig.CLEANUP_POLICY_CONFIG).value(), "Properties after creation have incorrect value");
|
||||
|
||||
|
@ -868,7 +868,7 @@ public class TopicCommandTest {
|
|||
() -> clusterInstance.brokers().values().stream().allMatch(p -> p.metadataCache().numPartitions(testTopicName).orElse(0) == numPartitionsModified),
|
||||
TestUtils.DEFAULT_MAX_WAIT_MS, "Timeout waiting for new assignment propagating to broker");
|
||||
|
||||
Config newProps = adminClient.describeConfigs(Collections.singleton(configResource)).all().get().get(configResource);
|
||||
Config newProps = adminClient.describeConfigs(Set.of(configResource)).all().get().get(configResource);
|
||||
assertNotNull(newProps.get(TopicConfig.CLEANUP_POLICY_CONFIG), "Updated properties do not contain " + TopicConfig.CLEANUP_POLICY_CONFIG);
|
||||
assertEquals(cleanUpPolicy, newProps.get(TopicConfig.CLEANUP_POLICY_CONFIG).value(), "Updated properties have incorrect value");
|
||||
|
||||
|
@ -887,7 +887,7 @@ public class TopicCommandTest {
|
|||
TopicCommand.TopicService topicService = new TopicCommand.TopicService(adminClient)) {
|
||||
String testTopicName = TestUtils.randomString(10);
|
||||
|
||||
adminClient.createTopics(Collections.singletonList(new NewTopic(testTopicName, defaultNumPartitions, defaultReplicationFactor)));
|
||||
adminClient.createTopics(List.of(new NewTopic(testTopicName, defaultNumPartitions, defaultReplicationFactor)));
|
||||
clusterInstance.waitTopicCreation(testTopicName, defaultNumPartitions);
|
||||
|
||||
// delete the NormalTopic
|
||||
|
@ -913,7 +913,7 @@ public class TopicCommandTest {
|
|||
TopicCommand.TopicService topicService = new TopicCommand.TopicService(adminClient)) {
|
||||
// create the topic with colliding chars
|
||||
String topicWithCollidingChar = "test.a";
|
||||
adminClient.createTopics(Collections.singletonList(new NewTopic(topicWithCollidingChar, defaultNumPartitions, defaultReplicationFactor)));
|
||||
adminClient.createTopics(List.of(new NewTopic(topicWithCollidingChar, defaultNumPartitions, defaultReplicationFactor)));
|
||||
clusterInstance.waitTopicCreation(topicWithCollidingChar, defaultNumPartitions);
|
||||
|
||||
// delete the topic
|
||||
|
@ -927,7 +927,7 @@ public class TopicCommandTest {
|
|||
clusterInstance.waitTopicDeletion(topicWithCollidingChar);
|
||||
|
||||
// recreate same topic
|
||||
adminClient.createTopics(Collections.singletonList(new NewTopic(topicWithCollidingChar, defaultNumPartitions, defaultReplicationFactor)));
|
||||
adminClient.createTopics(List.of(new NewTopic(topicWithCollidingChar, defaultNumPartitions, defaultReplicationFactor)));
|
||||
clusterInstance.waitTopicCreation(topicWithCollidingChar, defaultNumPartitions);
|
||||
}
|
||||
}
|
||||
|
@ -944,7 +944,7 @@ public class TopicCommandTest {
|
|||
TopicCommand.TopicService topicService = new TopicCommand.TopicService(adminClient)) {
|
||||
|
||||
// create the offset topic
|
||||
adminClient.createTopics(Collections.singletonList(new NewTopic(Topic.GROUP_METADATA_TOPIC_NAME, defaultNumPartitions, defaultReplicationFactor)));
|
||||
adminClient.createTopics(List.of(new NewTopic(Topic.GROUP_METADATA_TOPIC_NAME, defaultNumPartitions, defaultReplicationFactor)));
|
||||
clusterInstance.waitTopicCreation(Topic.GROUP_METADATA_TOPIC_NAME, defaultNumPartitions);
|
||||
|
||||
// Try to delete the Topic.GROUP_METADATA_TOPIC_NAME which is allowed by default.
|
||||
|
@ -1001,7 +1001,7 @@ public class TopicCommandTest {
|
|||
try (Admin adminClient = clusterInstance.admin()) {
|
||||
int partition = 2;
|
||||
short replicationFactor = 2;
|
||||
adminClient.createTopics(Collections.singletonList(new NewTopic(testTopicName, partition, replicationFactor)));
|
||||
adminClient.createTopics(List.of(new NewTopic(testTopicName, partition, replicationFactor)));
|
||||
clusterInstance.waitTopicCreation(testTopicName, partition);
|
||||
|
||||
String output = captureDescribeTopicStandardOut(clusterInstance, buildTopicCommandOptionsWithBootstrap(clusterInstance, "--describe", "--topic", testTopicName));
|
||||
|
@ -1075,7 +1075,7 @@ public class TopicCommandTest {
|
|||
int partitions = 3;
|
||||
short replicationFactor = 1;
|
||||
|
||||
adminClient.createTopics(Collections.singletonList(new NewTopic(testTopicName, partitions, replicationFactor)));
|
||||
adminClient.createTopics(List.of(new NewTopic(testTopicName, partitions, replicationFactor)));
|
||||
clusterInstance.waitTopicCreation(testTopicName, partitions);
|
||||
|
||||
// check which partition is on broker 0 which we'll kill
|
||||
|
@ -1102,11 +1102,11 @@ public class TopicCommandTest {
|
|||
try (Admin adminClient = clusterInstance.admin()) {
|
||||
int partitions = 1;
|
||||
short replicationFactor = 3;
|
||||
adminClient.createTopics(Collections.singletonList(new NewTopic(testTopicName, partitions, replicationFactor)));
|
||||
adminClient.createTopics(List.of(new NewTopic(testTopicName, partitions, replicationFactor)));
|
||||
clusterInstance.waitTopicCreation(testTopicName, partitions);
|
||||
|
||||
clusterInstance.shutdownBroker(0);
|
||||
Assertions.assertEquals(clusterInstance.aliveBrokers().size(), 2);
|
||||
Assertions.assertEquals(2, clusterInstance.aliveBrokers().size());
|
||||
|
||||
TestUtils.waitForCondition(
|
||||
() -> clusterInstance.aliveBrokers().values().stream().allMatch(
|
||||
|
@ -1133,7 +1133,7 @@ public class TopicCommandTest {
|
|||
topicConfig.put(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "3");
|
||||
int partitions = 1;
|
||||
short replicationFactor = 3;
|
||||
adminClient.createTopics(Collections.singletonList(new NewTopic(testTopicName, partitions, replicationFactor).configs(topicConfig)));
|
||||
adminClient.createTopics(List.of(new NewTopic(testTopicName, partitions, replicationFactor).configs(topicConfig)));
|
||||
clusterInstance.waitTopicCreation(testTopicName, partitions);
|
||||
|
||||
clusterInstance.shutdownBroker(0);
|
||||
|
@ -1157,7 +1157,7 @@ public class TopicCommandTest {
|
|||
|
||||
try (Admin adminClient = clusterInstance.admin();
|
||||
KafkaProducer<String, String> producer = createProducer(clusterInstance)) {
|
||||
adminClient.createTopics(Collections.singletonList(new NewTopic(testTopicName, defaultNumPartitions, defaultReplicationFactor)));
|
||||
adminClient.createTopics(List.of(new NewTopic(testTopicName, defaultNumPartitions, defaultReplicationFactor)));
|
||||
clusterInstance.waitTopicCreation(testTopicName, defaultNumPartitions);
|
||||
|
||||
TopicPartition tp = new TopicPartition(testTopicName, 0);
|
||||
|
@ -1170,22 +1170,22 @@ public class TopicCommandTest {
|
|||
// throughput so the reassignment doesn't complete quickly.
|
||||
List<Integer> brokerIds = new ArrayList<>(clusterInstance.brokerIds());
|
||||
|
||||
ToolsTestUtils.setReplicationThrottleForPartitions(adminClient, brokerIds, Collections.singleton(tp), 1);
|
||||
ToolsTestUtils.setReplicationThrottleForPartitions(adminClient, brokerIds, Set.of(tp), 1);
|
||||
|
||||
TopicDescription testTopicDesc = adminClient.describeTopics(Collections.singleton(testTopicName)).allTopicNames().get().get(testTopicName);
|
||||
TopicDescription testTopicDesc = adminClient.describeTopics(Set.of(testTopicName)).allTopicNames().get().get(testTopicName);
|
||||
TopicPartitionInfo firstPartition = testTopicDesc.partitions().get(0);
|
||||
|
||||
List<Integer> replicasOfFirstPartition = firstPartition.replicas().stream().map(Node::id).collect(Collectors.toList());
|
||||
List<Integer> replicasOfFirstPartition = firstPartition.replicas().stream().map(Node::id).toList();
|
||||
List<Integer> replicasDiff = new ArrayList<>(brokerIds);
|
||||
replicasDiff.removeAll(replicasOfFirstPartition);
|
||||
Integer targetReplica = replicasDiff.get(0);
|
||||
|
||||
adminClient.alterPartitionReassignments(Collections.singletonMap(tp,
|
||||
Optional.of(new NewPartitionReassignment(Collections.singletonList(targetReplica))))).all().get();
|
||||
adminClient.alterPartitionReassignments(Map.of(tp,
|
||||
Optional.of(new NewPartitionReassignment(List.of(targetReplica))))).all().get();
|
||||
|
||||
// let's wait until the LAIR is propagated
|
||||
TestUtils.waitForCondition(
|
||||
() -> !adminClient.listPartitionReassignments(Collections.singleton(tp)).reassignments().get()
|
||||
() -> !adminClient.listPartitionReassignments(Set.of(tp)).reassignments().get()
|
||||
.get(tp).addingReplicas().isEmpty(), CLUSTER_WAIT_MS, "Reassignment didn't add the second node"
|
||||
);
|
||||
|
||||
|
@ -1208,7 +1208,7 @@ public class TopicCommandTest {
|
|||
|
||||
TestUtils.waitForCondition(
|
||||
() -> {
|
||||
PartitionReassignment tempReassignments = adminClient.listPartitionReassignments(Collections.singleton(tp)).reassignments().get().get(tp);
|
||||
PartitionReassignment tempReassignments = adminClient.listPartitionReassignments(Set.of(tp)).reassignments().get().get(tp);
|
||||
reassignmentsRef.set(tempReassignments);
|
||||
return reassignmentsRef.get() != null;
|
||||
}, waitTimeMs, "Reassignments did not become non-null within the specified time"
|
||||
|
@ -1216,7 +1216,7 @@ public class TopicCommandTest {
|
|||
|
||||
assertFalse(reassignmentsRef.get().addingReplicas().isEmpty());
|
||||
|
||||
ToolsTestUtils.removeReplicationThrottleForPartitions(adminClient, brokerIds, Collections.singleton(tp));
|
||||
ToolsTestUtils.removeReplicationThrottleForPartitions(adminClient, brokerIds, Set.of(tp));
|
||||
TestUtils.waitForCondition(
|
||||
() -> adminClient.listPartitionReassignments().reassignments().get().isEmpty(),
|
||||
CLUSTER_WAIT_MS, String.format("reassignmet not finished after %s ms", CLUSTER_WAIT_MS)
|
||||
|
@ -1235,7 +1235,7 @@ public class TopicCommandTest {
|
|||
int partitions = 1;
|
||||
short replicationFactor = 6;
|
||||
|
||||
adminClient.createTopics(Collections.singletonList(new NewTopic(testTopicName, partitions, replicationFactor).configs(topicConfig)));
|
||||
adminClient.createTopics(List.of(new NewTopic(testTopicName, partitions, replicationFactor).configs(topicConfig)));
|
||||
clusterInstance.waitTopicCreation(testTopicName, partitions);
|
||||
|
||||
clusterInstance.shutdownBroker(0);
|
||||
|
@ -1278,10 +1278,10 @@ public class TopicCommandTest {
|
|||
List<NewTopic> newTopics = new ArrayList<>();
|
||||
|
||||
Map<Integer, List<Integer>> fullyReplicatedReplicaAssignmentMap = new HashMap<>();
|
||||
fullyReplicatedReplicaAssignmentMap.put(0, Arrays.asList(1, 2, 3));
|
||||
fullyReplicatedReplicaAssignmentMap.put(0, List.of(1, 2, 3));
|
||||
|
||||
Map<Integer, List<Integer>> offlineReplicaAssignmentMap = new HashMap<>();
|
||||
offlineReplicaAssignmentMap.put(0, Arrays.asList(0));
|
||||
offlineReplicaAssignmentMap.put(0, List.of(0));
|
||||
|
||||
Map<String, String> topicConfig = new HashMap<>();
|
||||
topicConfig.put(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "6");
|
||||
|
@ -1333,7 +1333,7 @@ public class TopicCommandTest {
|
|||
int partitions = 2;
|
||||
short replicationFactor = 2;
|
||||
|
||||
adminClient.createTopics(Collections.singletonList(new NewTopic(testTopicName, partitions, replicationFactor).configs(topicConfig)));
|
||||
adminClient.createTopics(List.of(new NewTopic(testTopicName, partitions, replicationFactor).configs(topicConfig)));
|
||||
clusterInstance.waitTopicCreation(testTopicName, partitions);
|
||||
|
||||
String output = captureDescribeTopicStandardOut(clusterInstance, buildTopicCommandOptionsWithBootstrap(clusterInstance, "--describe"));
|
||||
|
@ -1345,7 +1345,7 @@ public class TopicCommandTest {
|
|||
public void testDescribeAndListTopicsWithoutInternalTopics(ClusterInstance clusterInstance) throws InterruptedException {
|
||||
String testTopicName = TestUtils.randomString(10);
|
||||
try (Admin adminClient = clusterInstance.admin()) {
|
||||
adminClient.createTopics(Collections.singletonList(new NewTopic(testTopicName, defaultNumPartitions, defaultReplicationFactor)));
|
||||
adminClient.createTopics(List.of(new NewTopic(testTopicName, defaultNumPartitions, defaultReplicationFactor)));
|
||||
clusterInstance.waitTopicCreation(testTopicName, defaultNumPartitions);
|
||||
|
||||
// test describe
|
||||
|
@ -1374,9 +1374,9 @@ public class TopicCommandTest {
|
|||
new ClusterAuthorizationException("Unauthorized"));
|
||||
|
||||
doReturn(result).when(adminClient).listPartitionReassignments(
|
||||
Collections.singleton(new TopicPartition(testTopicName, 0))
|
||||
Set.of(new TopicPartition(testTopicName, 0))
|
||||
);
|
||||
adminClient.createTopics(Collections.singletonList(new NewTopic(testTopicName, defaultNumPartitions, defaultReplicationFactor)));
|
||||
adminClient.createTopics(List.of(new NewTopic(testTopicName, defaultNumPartitions, defaultReplicationFactor)));
|
||||
clusterInstance.waitTopicCreation(testTopicName, defaultNumPartitions);
|
||||
|
||||
String output = captureDescribeTopicStandardOut(clusterInstance, buildTopicCommandOptionsWithBootstrap(clusterInstance, "--describe", "--topic", testTopicName));
|
||||
|
@ -1395,7 +1395,7 @@ public class TopicCommandTest {
|
|||
String topic = "foo_bar";
|
||||
int partitions = 1;
|
||||
short replicationFactor = 3;
|
||||
adminClient.createTopics(Collections.singletonList(new NewTopic(topic, partitions, replicationFactor)));
|
||||
adminClient.createTopics(List.of(new NewTopic(topic, partitions, replicationFactor)));
|
||||
clusterInstance.waitTopicCreation(topic, defaultNumPartitions);
|
||||
|
||||
assertThrows(TopicExistsException.class,
|
||||
|
@ -1444,7 +1444,7 @@ public class TopicCommandTest {
|
|||
|
||||
List<Integer> partitionRackMapValueSize = partitionRackMap.values().stream()
|
||||
.map(value -> (int) value.stream().distinct().count())
|
||||
.collect(Collectors.toList());
|
||||
.toList();
|
||||
|
||||
List<Integer> expected = Collections.nCopies(numPartitions, replicationFactor);
|
||||
assertEquals(expected, partitionRackMapValueSize, "More than one replica of the same partition is assigned to the same rack");
|
||||
|
@ -1502,9 +1502,9 @@ public class TopicCommandTest {
|
|||
String rack;
|
||||
if (brokerRackMapping.containsKey(brokerId)) {
|
||||
rack = brokerRackMapping.get(brokerId);
|
||||
List<String> partitionRackValues = Stream.of(Collections.singletonList(rack), partitionRackMap.getOrDefault(partitionId, Collections.emptyList()))
|
||||
List<String> partitionRackValues = Stream.of(List.of(rack), partitionRackMap.getOrDefault(partitionId, List.of()))
|
||||
.flatMap(List::stream)
|
||||
.collect(Collectors.toList());
|
||||
.toList();
|
||||
partitionRackMap.put(partitionId, partitionRackValues);
|
||||
} else {
|
||||
System.err.printf("No mapping found for %s in `brokerRackMapping`%n", brokerId);
|
||||
|
@ -1514,18 +1514,9 @@ public class TopicCommandTest {
|
|||
return new ReplicaDistributions(partitionRackMap, leaderCount, partitionCount);
|
||||
}
|
||||
|
||||
private static class ReplicaDistributions {
|
||||
private final Map<Integer, List<String>> partitionRacks;
|
||||
private final Map<Integer, Integer> brokerLeaderCount;
|
||||
private final Map<Integer, Integer> brokerReplicasCount;
|
||||
|
||||
public ReplicaDistributions(Map<Integer, List<String>> partitionRacks,
|
||||
private record ReplicaDistributions(Map<Integer, List<String>> partitionRacks,
|
||||
Map<Integer, Integer> brokerLeaderCount,
|
||||
Map<Integer, Integer> brokerReplicasCount) {
|
||||
this.partitionRacks = partitionRacks;
|
||||
this.brokerLeaderCount = brokerLeaderCount;
|
||||
this.brokerReplicasCount = brokerReplicasCount;
|
||||
}
|
||||
}
|
||||
|
||||
private KafkaProducer<String, String> createProducer(ClusterInstance clusterInstance) {
|
||||
|
|
|
@ -57,9 +57,7 @@ import java.io.IOException;
|
|||
import java.io.InputStreamReader;
|
||||
import java.io.PrintStream;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
|
@ -69,11 +67,6 @@ import java.util.OptionalLong;
|
|||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static java.util.Arrays.asList;
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.singleton;
|
||||
import static java.util.Collections.singletonList;
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static org.apache.kafka.common.KafkaFuture.completedFuture;
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
|
@ -161,7 +154,7 @@ public class TransactionsCommandTest {
|
|||
) throws Exception {
|
||||
DescribeProducersResult describeResult = Mockito.mock(DescribeProducersResult.class);
|
||||
KafkaFuture<PartitionProducerState> describeFuture = completedFuture(
|
||||
new PartitionProducerState(asList(
|
||||
new PartitionProducerState(List.of(
|
||||
new ProducerState(12345L, 15, 1300, 1599509565L,
|
||||
OptionalInt.of(20), OptionalLong.of(990)),
|
||||
new ProducerState(98765L, 30, 2300, 1599509599L,
|
||||
|
@ -170,7 +163,7 @@ public class TransactionsCommandTest {
|
|||
|
||||
|
||||
Mockito.when(describeResult.partitionResult(topicPartition)).thenReturn(describeFuture);
|
||||
Mockito.when(admin.describeProducers(singleton(topicPartition), expectedOptions)).thenReturn(describeResult);
|
||||
Mockito.when(admin.describeProducers(Set.of(topicPartition), expectedOptions)).thenReturn(describeResult);
|
||||
|
||||
execute(args);
|
||||
assertNormalExit();
|
||||
|
@ -182,8 +175,8 @@ public class TransactionsCommandTest {
|
|||
assertEquals(expectedHeaders, table.get(0));
|
||||
|
||||
Set<List<String>> expectedRows = Set.of(
|
||||
asList("12345", "15", "20", "1300", "1599509565", "990"),
|
||||
asList("98765", "30", "-1", "2300", "1599509599", "None")
|
||||
List.of("12345", "15", "20", "1300", "1599509565", "990"),
|
||||
List.of("98765", "30", "-1", "2300", "1599509599", "None")
|
||||
);
|
||||
assertEquals(expectedRows, new HashSet<>(table.subList(1, table.size())));
|
||||
}
|
||||
|
@ -233,9 +226,9 @@ public class TransactionsCommandTest {
|
|||
assertEquals(expectedHeaders, table.get(0));
|
||||
|
||||
Set<List<String>> expectedRows = Set.of(
|
||||
asList("foo", "0", "12345", "Ongoing"),
|
||||
asList("bar", "0", "98765", "PrepareAbort"),
|
||||
asList("baz", "1", "13579", "CompleteCommit")
|
||||
List.of("foo", "0", "12345", "Ongoing"),
|
||||
List.of("bar", "0", "98765", "PrepareAbort"),
|
||||
List.of("baz", "1", "13579", "CompleteCommit")
|
||||
);
|
||||
assertEquals(expectedRows, new HashSet<>(table.subList(1, table.size())));
|
||||
}
|
||||
|
@ -302,11 +295,11 @@ public class TransactionsCommandTest {
|
|||
15,
|
||||
10000,
|
||||
OptionalLong.of(transactionStartTime),
|
||||
singleton(new TopicPartition("bar", 0))
|
||||
Set.of(new TopicPartition("bar", 0))
|
||||
));
|
||||
|
||||
Mockito.when(describeResult.description(transactionalId)).thenReturn(describeFuture);
|
||||
Mockito.when(admin.describeTransactions(singleton(transactionalId))).thenReturn(describeResult);
|
||||
Mockito.when(admin.describeTransactions(Set.of(transactionalId))).thenReturn(describeResult);
|
||||
|
||||
// Add a little time so that we can see a positive transaction duration in the output
|
||||
time.sleep(5000);
|
||||
|
@ -320,7 +313,7 @@ public class TransactionsCommandTest {
|
|||
List<String> expectedHeaders = TransactionsCommand.DescribeTransactionsCommand.HEADERS;
|
||||
assertEquals(expectedHeaders, table.get(0));
|
||||
|
||||
List<String> expectedRow = asList(
|
||||
List<String> expectedRow = List.of(
|
||||
String.valueOf(coordinatorId),
|
||||
transactionalId,
|
||||
"12345",
|
||||
|
@ -463,7 +456,7 @@ public class TransactionsCommandTest {
|
|||
|
||||
DescribeProducersResult describeResult = Mockito.mock(DescribeProducersResult.class);
|
||||
KafkaFuture<PartitionProducerState> describeFuture = completedFuture(
|
||||
new PartitionProducerState(singletonList(
|
||||
new PartitionProducerState(List.of(
|
||||
new ProducerState(producerId, producerEpoch, 1300, 1599509565L,
|
||||
OptionalInt.of(coordinatorEpoch), OptionalLong.of(startOffset))
|
||||
)));
|
||||
|
@ -474,7 +467,7 @@ public class TransactionsCommandTest {
|
|||
topicPartition, producerId, producerEpoch, coordinatorEpoch);
|
||||
|
||||
Mockito.when(describeResult.partitionResult(topicPartition)).thenReturn(describeFuture);
|
||||
Mockito.when(admin.describeProducers(singleton(topicPartition))).thenReturn(describeResult);
|
||||
Mockito.when(admin.describeProducers(Set.of(topicPartition))).thenReturn(describeResult);
|
||||
|
||||
Mockito.when(abortTransactionResult.all()).thenReturn(abortFuture);
|
||||
Mockito.when(admin.abortTransaction(expectedAbortSpec)).thenReturn(abortTransactionResult);
|
||||
|
@ -570,7 +563,7 @@ public class TransactionsCommandTest {
|
|||
OptionalInt coordinatorEpoch,
|
||||
OptionalLong txnStartOffset
|
||||
) {
|
||||
PartitionProducerState partitionProducerState = new PartitionProducerState(singletonList(
|
||||
PartitionProducerState partitionProducerState = new PartitionProducerState(List.of(
|
||||
new ProducerState(
|
||||
producerId,
|
||||
producerEpoch,
|
||||
|
@ -583,11 +576,11 @@ public class TransactionsCommandTest {
|
|||
|
||||
DescribeProducersResult result = Mockito.mock(DescribeProducersResult.class);
|
||||
Mockito.when(result.all()).thenReturn(
|
||||
completedFuture(singletonMap(topicPartition, partitionProducerState))
|
||||
completedFuture(Map.of(topicPartition, partitionProducerState))
|
||||
);
|
||||
|
||||
Mockito.when(admin.describeProducers(
|
||||
Collections.singletonList(topicPartition),
|
||||
List.of(topicPartition),
|
||||
new DescribeProducersOptions()
|
||||
)).thenReturn(result);
|
||||
}
|
||||
|
@ -596,10 +589,10 @@ public class TransactionsCommandTest {
|
|||
Map<String, TransactionDescription> descriptions
|
||||
) {
|
||||
DescribeTransactionsResult result = Mockito.mock(DescribeTransactionsResult.class);
|
||||
descriptions.forEach((transactionalId, description) -> {
|
||||
descriptions.forEach((transactionalId, description) ->
|
||||
Mockito.when(result.description(transactionalId))
|
||||
.thenReturn(completedFuture(description));
|
||||
});
|
||||
.thenReturn(completedFuture(description))
|
||||
);
|
||||
Mockito.when(result.all()).thenReturn(completedFuture(descriptions));
|
||||
Mockito.when(admin.describeTransactions(descriptions.keySet())).thenReturn(result);
|
||||
}
|
||||
|
@ -634,7 +627,7 @@ public class TransactionsCommandTest {
|
|||
};
|
||||
|
||||
String topic = "foo";
|
||||
expectListTopics(singleton(topic));
|
||||
expectListTopics(Set.of(topic));
|
||||
|
||||
Node node0 = new Node(0, "localhost", 9092);
|
||||
Node node1 = new Node(1, "localhost", 9093);
|
||||
|
@ -643,28 +636,28 @@ public class TransactionsCommandTest {
|
|||
TopicPartitionInfo partition0 = new TopicPartitionInfo(
|
||||
0,
|
||||
node0,
|
||||
Arrays.asList(node0, node1),
|
||||
Arrays.asList(node0, node1)
|
||||
List.of(node0, node1),
|
||||
List.of(node0, node1)
|
||||
);
|
||||
TopicPartitionInfo partition1 = new TopicPartitionInfo(
|
||||
1,
|
||||
node1,
|
||||
Arrays.asList(node1, node5),
|
||||
Arrays.asList(node1, node5)
|
||||
List.of(node1, node5),
|
||||
List.of(node1, node5)
|
||||
);
|
||||
|
||||
TopicDescription description = new TopicDescription(
|
||||
topic,
|
||||
false,
|
||||
Arrays.asList(partition0, partition1)
|
||||
List.of(partition0, partition1)
|
||||
);
|
||||
expectDescribeTopics(singletonMap(topic, description));
|
||||
expectDescribeTopics(Map.of(topic, description));
|
||||
|
||||
DescribeProducersResult result = Mockito.mock(DescribeProducersResult.class);
|
||||
Mockito.when(result.all()).thenReturn(completedFuture(emptyMap()));
|
||||
Mockito.when(result.all()).thenReturn(completedFuture(Map.of()));
|
||||
|
||||
Mockito.when(admin.describeProducers(
|
||||
Collections.singletonList(new TopicPartition(topic, 1)),
|
||||
List.of(new TopicPartition(topic, 1)),
|
||||
new DescribeProducersOptions().brokerId(brokerId)
|
||||
)).thenReturn(result);
|
||||
|
||||
|
@ -695,28 +688,28 @@ public class TransactionsCommandTest {
|
|||
TopicPartitionInfo partition0 = new TopicPartitionInfo(
|
||||
0,
|
||||
node0,
|
||||
Arrays.asList(node0, node1),
|
||||
Arrays.asList(node0, node1)
|
||||
List.of(node0, node1),
|
||||
List.of(node0, node1)
|
||||
);
|
||||
TopicPartitionInfo partition1 = new TopicPartitionInfo(
|
||||
1,
|
||||
node1,
|
||||
Arrays.asList(node1, node5),
|
||||
Arrays.asList(node1, node5)
|
||||
List.of(node1, node5),
|
||||
List.of(node1, node5)
|
||||
);
|
||||
|
||||
TopicDescription description = new TopicDescription(
|
||||
topic,
|
||||
false,
|
||||
Arrays.asList(partition0, partition1)
|
||||
List.of(partition0, partition1)
|
||||
);
|
||||
expectDescribeTopics(singletonMap(topic, description));
|
||||
expectDescribeTopics(Map.of(topic, description));
|
||||
|
||||
DescribeProducersResult result = Mockito.mock(DescribeProducersResult.class);
|
||||
Mockito.when(result.all()).thenReturn(completedFuture(emptyMap()));
|
||||
Mockito.when(result.all()).thenReturn(completedFuture(Map.of()));
|
||||
|
||||
Mockito.when(admin.describeProducers(
|
||||
Collections.singletonList(new TopicPartition(topic, 1)),
|
||||
List.of(new TopicPartition(topic, 1)),
|
||||
new DescribeProducersOptions().brokerId(brokerId)
|
||||
)).thenReturn(result);
|
||||
|
||||
|
@ -744,28 +737,28 @@ public class TransactionsCommandTest {
|
|||
TopicPartitionInfo partition0 = new TopicPartitionInfo(
|
||||
0,
|
||||
node0,
|
||||
Arrays.asList(node0, node1),
|
||||
Arrays.asList(node0, node1)
|
||||
List.of(node0, node1),
|
||||
List.of(node0, node1)
|
||||
);
|
||||
TopicPartitionInfo partition1 = new TopicPartitionInfo(
|
||||
1,
|
||||
node1,
|
||||
Arrays.asList(node1, node5),
|
||||
Arrays.asList(node1, node5)
|
||||
List.of(node1, node5),
|
||||
List.of(node1, node5)
|
||||
);
|
||||
|
||||
TopicDescription description = new TopicDescription(
|
||||
topic,
|
||||
false,
|
||||
Arrays.asList(partition0, partition1)
|
||||
List.of(partition0, partition1)
|
||||
);
|
||||
expectDescribeTopics(singletonMap(topic, description));
|
||||
expectDescribeTopics(Map.of(topic, description));
|
||||
|
||||
DescribeProducersResult result = Mockito.mock(DescribeProducersResult.class);
|
||||
Mockito.when(result.all()).thenReturn(completedFuture(emptyMap()));
|
||||
Mockito.when(result.all()).thenReturn(completedFuture(Map.of()));
|
||||
|
||||
Mockito.when(admin.describeProducers(
|
||||
Arrays.asList(new TopicPartition(topic, 0), new TopicPartition(topic, 1)),
|
||||
List.of(new TopicPartition(topic, 0), new TopicPartition(topic, 1)),
|
||||
new DescribeProducersOptions()
|
||||
)).thenReturn(result);
|
||||
|
||||
|
@ -851,11 +844,11 @@ public class TransactionsCommandTest {
|
|||
);
|
||||
|
||||
expectListTransactions(
|
||||
new ListTransactionsOptions().filterProducerIds(singleton(producerId)),
|
||||
singletonMap(1, Collections.emptyList())
|
||||
new ListTransactionsOptions().filterProducerIds(Set.of(producerId)),
|
||||
Map.of(1, List.of())
|
||||
);
|
||||
|
||||
expectDescribeTransactions(Collections.emptyMap());
|
||||
expectDescribeTransactions(Map.of());
|
||||
|
||||
execute(args);
|
||||
assertNormalExit();
|
||||
|
@ -907,14 +900,14 @@ public class TransactionsCommandTest {
|
|||
);
|
||||
|
||||
expectListTransactions(
|
||||
new ListTransactionsOptions().filterProducerIds(singleton(producerId)),
|
||||
singletonMap(1, Collections.singletonList(listing))
|
||||
new ListTransactionsOptions().filterProducerIds(Set.of(producerId)),
|
||||
Map.of(1, List.of(listing))
|
||||
);
|
||||
|
||||
DescribeTransactionsResult result = Mockito.mock(DescribeTransactionsResult.class);
|
||||
Mockito.when(result.description(transactionalId))
|
||||
.thenReturn(failedFuture(new TransactionalIdNotFoundException(transactionalId + " not found")));
|
||||
Mockito.when(admin.describeTransactions(singleton(transactionalId))).thenReturn(result);
|
||||
Mockito.when(admin.describeTransactions(Set.of(transactionalId))).thenReturn(result);
|
||||
|
||||
execute(args);
|
||||
assertNormalExit();
|
||||
|
@ -972,8 +965,8 @@ public class TransactionsCommandTest {
|
|||
);
|
||||
|
||||
expectListTransactions(
|
||||
new ListTransactionsOptions().filterProducerIds(singleton(producerId)),
|
||||
singletonMap(1, Collections.singletonList(listing))
|
||||
new ListTransactionsOptions().filterProducerIds(Set.of(producerId)),
|
||||
Map.of(1, List.of(listing))
|
||||
);
|
||||
|
||||
// Although there is a transaction in progress from the same
|
||||
|
@ -986,10 +979,10 @@ public class TransactionsCommandTest {
|
|||
producerEpoch,
|
||||
60000,
|
||||
OptionalLong.of(time.milliseconds()),
|
||||
singleton(new TopicPartition("foo", 10))
|
||||
Set.of(new TopicPartition("foo", 10))
|
||||
);
|
||||
|
||||
expectDescribeTransactions(singletonMap(transactionalId, description));
|
||||
expectDescribeTransactions(Map.of(transactionalId, description));
|
||||
|
||||
execute(args);
|
||||
assertNormalExit();
|
||||
|
@ -1020,7 +1013,7 @@ public class TransactionsCommandTest {
|
|||
|
||||
long durationMinutes = TimeUnit.MILLISECONDS.toMinutes(time.milliseconds() - lastTimestamp);
|
||||
|
||||
List<String> expectedRow = asList(
|
||||
List<String> expectedRow = List.of(
|
||||
topicPartition.topic(),
|
||||
String.valueOf(topicPartition.partition()),
|
||||
String.valueOf(producerId),
|
||||
|
@ -1070,8 +1063,8 @@ public class TransactionsCommandTest {
|
|||
);
|
||||
|
||||
expectListTransactions(
|
||||
new ListTransactionsOptions().filterProducerIds(singleton(producerId)),
|
||||
singletonMap(1, Collections.singletonList(listing))
|
||||
new ListTransactionsOptions().filterProducerIds(Set.of(producerId)),
|
||||
Map.of(1, List.of(listing))
|
||||
);
|
||||
|
||||
// The coordinator shows an active transaction with the same epoch
|
||||
|
@ -1084,10 +1077,10 @@ public class TransactionsCommandTest {
|
|||
producerEpoch,
|
||||
60000,
|
||||
OptionalLong.of(lastTimestamp),
|
||||
singleton(topicPartition)
|
||||
Set.of(topicPartition)
|
||||
);
|
||||
|
||||
expectDescribeTransactions(singletonMap(transactionalId, description));
|
||||
expectDescribeTransactions(Map.of(transactionalId, description));
|
||||
|
||||
execute(args);
|
||||
assertNormalExit();
|
||||
|
@ -1118,7 +1111,7 @@ public class TransactionsCommandTest {
|
|||
if (line == null) {
|
||||
return null;
|
||||
} else {
|
||||
return asList(line.split("\\s+"));
|
||||
return List.of(line.split("\\s+"));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -25,7 +25,6 @@ import org.apache.kafka.test.NoRetryException;
|
|||
import org.apache.kafka.test.TestUtils;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.OptionalInt;
|
||||
|
@ -68,8 +67,8 @@ public class UserScramCredentialsCommandTest {
|
|||
throw new RuntimeException();
|
||||
});
|
||||
|
||||
List<String> commandArgs = new ArrayList<>(Arrays.asList("--bootstrap-server", cluster.bootstrapServers()));
|
||||
commandArgs.addAll(Arrays.asList(args));
|
||||
List<String> commandArgs = new ArrayList<>(List.of("--bootstrap-server", cluster.bootstrapServers()));
|
||||
commandArgs.addAll(List.of(args));
|
||||
try {
|
||||
String output = captureStandardOut(() -> ConfigCommand.main(commandArgs.toArray(new String[0])));
|
||||
return new ConfigCommandResult(output);
|
||||
|
@ -162,7 +161,7 @@ public class UserScramCredentialsCommandTest {
|
|||
() -> {
|
||||
try {
|
||||
String output = runConfigCommandViaBroker("--entity-type", "users", "--describe").stdout;
|
||||
return Arrays.asList(msgs).contains(output);
|
||||
return List.of(msgs).contains(output);
|
||||
} catch (Exception e) {
|
||||
throw new NoRetryException(e);
|
||||
}
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.junit.jupiter.api.Test;
|
|||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
|
@ -283,7 +282,7 @@ public class ConsoleConsumerOptionsTest {
|
|||
});
|
||||
|
||||
// different in all three places
|
||||
File propsFile = ToolsTestUtils.tempPropertiesFile(Collections.singletonMap("group.id", "group-from-file"));
|
||||
File propsFile = ToolsTestUtils.tempPropertiesFile(Map.of("group.id", "group-from-file"));
|
||||
final String[] args = new String[]{
|
||||
"--bootstrap-server", "localhost:9092",
|
||||
"--topic", "test",
|
||||
|
@ -295,7 +294,7 @@ public class ConsoleConsumerOptionsTest {
|
|||
assertThrows(IllegalArgumentException.class, () -> new ConsoleConsumerOptions(args));
|
||||
|
||||
// the same in all three places
|
||||
propsFile = ToolsTestUtils.tempPropertiesFile(Collections.singletonMap("group.id", "test-group"));
|
||||
propsFile = ToolsTestUtils.tempPropertiesFile(Map.of("group.id", "test-group"));
|
||||
final String[] args1 = new String[]{
|
||||
"--bootstrap-server", "localhost:9092",
|
||||
"--topic", "test",
|
||||
|
@ -309,7 +308,7 @@ public class ConsoleConsumerOptionsTest {
|
|||
assertEquals("test-group", props.getProperty("group.id"));
|
||||
|
||||
// different via --consumer-property and --consumer.config
|
||||
propsFile = ToolsTestUtils.tempPropertiesFile(Collections.singletonMap("group.id", "group-from-file"));
|
||||
propsFile = ToolsTestUtils.tempPropertiesFile(Map.of("group.id", "group-from-file"));
|
||||
final String[] args2 = new String[]{
|
||||
"--bootstrap-server", "localhost:9092",
|
||||
"--topic", "test",
|
||||
|
@ -330,7 +329,7 @@ public class ConsoleConsumerOptionsTest {
|
|||
assertThrows(IllegalArgumentException.class, () -> new ConsoleConsumerOptions(args3));
|
||||
|
||||
// different via --group and --consumer.config
|
||||
propsFile = ToolsTestUtils.tempPropertiesFile(Collections.singletonMap("group.id", "group-from-file"));
|
||||
propsFile = ToolsTestUtils.tempPropertiesFile(Map.of("group.id", "group-from-file"));
|
||||
final String[] args4 = new String[]{
|
||||
"--bootstrap-server", "localhost:9092",
|
||||
"--topic", "test",
|
||||
|
|
|
@ -61,14 +61,13 @@ import java.io.ByteArrayOutputStream;
|
|||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
import java.time.Duration;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import static java.util.Collections.singleton;
|
||||
import static org.apache.kafka.clients.CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG;
|
||||
import static org.apache.kafka.clients.consumer.ConsumerConfig.AUTO_OFFSET_RESET_CONFIG;
|
||||
import static org.apache.kafka.clients.consumer.ConsumerConfig.EXCLUDE_INTERNAL_TOPICS_CONFIG;
|
||||
|
@ -155,7 +154,7 @@ public class ConsoleConsumerTest {
|
|||
mockConsumer
|
||||
);
|
||||
|
||||
mockConsumer.rebalance(Arrays.asList(tp1, tp2));
|
||||
mockConsumer.rebalance(List.of(tp1, tp2));
|
||||
Map<TopicPartition, Long> offsets = new HashMap<>();
|
||||
offsets.put(tp1, startOffset);
|
||||
offsets.put(tp2, startOffset);
|
||||
|
@ -234,8 +233,8 @@ public class ConsoleConsumerTest {
|
|||
mockConsumer
|
||||
);
|
||||
|
||||
verify(mockConsumer).assign(eq(Collections.singletonList(tp0)));
|
||||
verify(mockConsumer).seekToEnd(eq(Collections.singletonList(tp0)));
|
||||
verify(mockConsumer).assign(eq(List.of(tp0)));
|
||||
verify(mockConsumer).seekToEnd(eq(List.of(tp0)));
|
||||
consumer.cleanup();
|
||||
reset(mockConsumer);
|
||||
|
||||
|
@ -249,7 +248,7 @@ public class ConsoleConsumerTest {
|
|||
|
||||
consumer = new ConsoleConsumer.ConsumerWrapper(new ConsoleConsumerOptions(args), mockConsumer);
|
||||
|
||||
verify(mockConsumer).assign(eq(Collections.singletonList(tp0)));
|
||||
verify(mockConsumer).assign(eq(List.of(tp0)));
|
||||
verify(mockConsumer).seek(eq(tp0), eq(123L));
|
||||
consumer.cleanup();
|
||||
reset(mockConsumer);
|
||||
|
@ -264,8 +263,8 @@ public class ConsoleConsumerTest {
|
|||
|
||||
consumer = new ConsoleConsumer.ConsumerWrapper(new ConsoleConsumerOptions(args), mockConsumer);
|
||||
|
||||
verify(mockConsumer).assign(eq(Collections.singletonList(tp0)));
|
||||
verify(mockConsumer).seekToBeginning(eq(Collections.singletonList(tp0)));
|
||||
verify(mockConsumer).assign(eq(List.of(tp0)));
|
||||
verify(mockConsumer).seekToBeginning(eq(List.of(tp0)));
|
||||
consumer.cleanup();
|
||||
reset(mockConsumer);
|
||||
}
|
||||
|
@ -295,7 +294,7 @@ public class ConsoleConsumerTest {
|
|||
try (Admin admin = cluster.admin()) {
|
||||
|
||||
NewTopic newTopic = new NewTopic(topic, 1, (short) 1);
|
||||
admin.createTopics(singleton(newTopic));
|
||||
admin.createTopics(Set.of(newTopic));
|
||||
produceMessagesWithTxn(cluster);
|
||||
|
||||
String[] transactionLogMessageFormatter = createConsoleConsumerArgs(cluster,
|
||||
|
@ -334,7 +333,7 @@ public class ConsoleConsumerTest {
|
|||
try (Admin admin = cluster.admin()) {
|
||||
|
||||
NewTopic newTopic = new NewTopic(topic, 1, (short) 1);
|
||||
admin.createTopics(singleton(newTopic));
|
||||
admin.createTopics(Set.of(newTopic));
|
||||
produceMessages(cluster);
|
||||
|
||||
String[] offsetsMessageFormatter = createConsoleConsumerArgs(cluster,
|
||||
|
@ -376,7 +375,7 @@ public class ConsoleConsumerTest {
|
|||
try (Admin admin = cluster.admin()) {
|
||||
|
||||
NewTopic newTopic = new NewTopic(topic, 1, (short) 1);
|
||||
admin.createTopics(singleton(newTopic));
|
||||
admin.createTopics(Set.of(newTopic));
|
||||
produceMessages(cluster);
|
||||
|
||||
String[] groupMetadataMessageFormatter = createConsoleConsumerArgs(cluster,
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.junit.jupiter.api.Test;
|
|||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
|
@ -116,7 +115,7 @@ public class ConsoleShareConsumerOptionsTest {
|
|||
});
|
||||
|
||||
// different in all three places
|
||||
File propsFile = ToolsTestUtils.tempPropertiesFile(Collections.singletonMap("group.id", "group-from-file"));
|
||||
File propsFile = ToolsTestUtils.tempPropertiesFile(Map.of("group.id", "group-from-file"));
|
||||
final String[] args = new String[]{
|
||||
"--bootstrap-server", "localhost:9092",
|
||||
"--topic", "test",
|
||||
|
@ -128,7 +127,7 @@ public class ConsoleShareConsumerOptionsTest {
|
|||
assertThrows(IllegalArgumentException.class, () -> new ConsoleShareConsumerOptions(args));
|
||||
|
||||
// the same in all three places
|
||||
propsFile = ToolsTestUtils.tempPropertiesFile(Collections.singletonMap("group.id", "test-group"));
|
||||
propsFile = ToolsTestUtils.tempPropertiesFile(Map.of("group.id", "test-group"));
|
||||
final String[] args1 = new String[]{
|
||||
"--bootstrap-server", "localhost:9092",
|
||||
"--topic", "test",
|
||||
|
@ -142,7 +141,7 @@ public class ConsoleShareConsumerOptionsTest {
|
|||
assertEquals("test-group", props.getProperty("group.id"));
|
||||
|
||||
// different via --consumer-property and --consumer-config
|
||||
propsFile = ToolsTestUtils.tempPropertiesFile(Collections.singletonMap("group.id", "group-from-file"));
|
||||
propsFile = ToolsTestUtils.tempPropertiesFile(Map.of("group.id", "group-from-file"));
|
||||
final String[] args2 = new String[]{
|
||||
"--bootstrap-server", "localhost:9092",
|
||||
"--topic", "test",
|
||||
|
@ -163,7 +162,7 @@ public class ConsoleShareConsumerOptionsTest {
|
|||
assertThrows(IllegalArgumentException.class, () -> new ConsoleShareConsumerOptions(args3));
|
||||
|
||||
// different via --group and --consumer-config
|
||||
propsFile = ToolsTestUtils.tempPropertiesFile(Collections.singletonMap("group.id", "group-from-file"));
|
||||
propsFile = ToolsTestUtils.tempPropertiesFile(Map.of("group.id", "group-from-file"));
|
||||
final String[] args4 = new String[]{
|
||||
"--bootstrap-server", "localhost:9092",
|
||||
"--topic", "test",
|
||||
|
|
|
@ -28,10 +28,10 @@ import org.junit.jupiter.params.provider.MethodSource;
|
|||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.PrintStream;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
|
||||
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
|
||||
|
@ -59,7 +59,7 @@ public abstract class CoordinatorRecordMessageFormatterTest {
|
|||
);
|
||||
|
||||
try (MessageFormatter formatter = formatter()) {
|
||||
formatter.configure(emptyMap());
|
||||
formatter.configure(Map.of());
|
||||
ByteArrayOutputStream out = new ByteArrayOutputStream();
|
||||
formatter.writeTo(record, new PrintStream(out));
|
||||
assertEquals(expectedOutput.replaceAll("\\s+", ""), out.toString());
|
||||
|
|
|
@ -24,10 +24,9 @@ import org.apache.kafka.coordinator.group.generated.OffsetCommitValue;
|
|||
|
||||
import org.junit.jupiter.params.provider.Arguments;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static java.util.Collections.singletonList;
|
||||
|
||||
public class GroupMetadataMessageFormatterTest extends CoordinatorRecordMessageFormatterTest {
|
||||
|
||||
private static final OffsetCommitKey OFFSET_COMMIT_KEY = new OffsetCommitKey()
|
||||
|
@ -56,7 +55,7 @@ public class GroupMetadataMessageFormatterTest extends CoordinatorRecordMessageF
|
|||
.setGeneration(1)
|
||||
.setProtocol("range")
|
||||
.setLeader("leader")
|
||||
.setMembers(singletonList(MEMBER_METADATA))
|
||||
.setMembers(List.of(MEMBER_METADATA))
|
||||
.setCurrentStateTimestamp(1234L);
|
||||
|
||||
@Override
|
||||
|
|
|
@ -25,7 +25,7 @@ import org.apache.kafka.coordinator.group.generated.OffsetCommitValue;
|
|||
|
||||
import org.junit.jupiter.params.provider.Arguments;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
public class OffsetMessageFormatterTest extends CoordinatorRecordMessageFormatterTest {
|
||||
|
@ -46,7 +46,7 @@ public class OffsetMessageFormatterTest extends CoordinatorRecordMessageFormatte
|
|||
.setGeneration(1)
|
||||
.setProtocol("range")
|
||||
.setLeader("leader")
|
||||
.setMembers(Collections.emptyList());
|
||||
.setMembers(List.of());
|
||||
|
||||
@Override
|
||||
protected CoordinatorRecordMessageFormatter formatter() {
|
||||
|
|
|
@ -22,10 +22,9 @@ import org.apache.kafka.coordinator.transaction.generated.TransactionLogValue;
|
|||
|
||||
import org.junit.jupiter.params.provider.Arguments;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
|
||||
public class TransactionLogMessageFormatterTest extends CoordinatorRecordMessageFormatterTest {
|
||||
|
||||
private static final TransactionLogKey TXN_LOG_KEY = new TransactionLogKey()
|
||||
|
@ -37,7 +36,7 @@ public class TransactionLogMessageFormatterTest extends CoordinatorRecordMessage
|
|||
.setTransactionStartTimestampMs(750L)
|
||||
.setTransactionLastUpdateTimestampMs(1000L)
|
||||
.setTransactionTimeoutMs(500)
|
||||
.setTransactionPartitions(emptyList());
|
||||
.setTransactionPartitions(List.of());
|
||||
|
||||
@Override
|
||||
protected CoordinatorRecordMessageFormatter formatter() {
|
||||
|
|
|
@ -23,7 +23,8 @@ import org.apache.kafka.common.errors.GroupIdNotFoundException;
|
|||
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
|
||||
import scala.jdk.javaapi.CollectionConverters;
|
||||
|
@ -36,11 +37,11 @@ import static org.junit.jupiter.api.Assertions.fail;
|
|||
public class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest {
|
||||
@Test
|
||||
public void testDescribeGroupCliWithGroupDescribe() throws Exception {
|
||||
addAndVerifyAcls(CollectionConverters.asScala(Collections.singleton(new AccessControlEntry(ClientPrincipal().toString(), "*", DESCRIBE, ALLOW))).toSet(), groupResource());
|
||||
addAndVerifyAcls(CollectionConverters.asScala(Set.of(new AccessControlEntry(ClientPrincipal().toString(), "*", DESCRIBE, ALLOW))).toSet(), groupResource());
|
||||
|
||||
String[] cgcArgs = new String[]{"--bootstrap-server", bootstrapServers(listenerName()), "--describe", "--group", group()};
|
||||
ConsumerGroupCommandOptions opts = ConsumerGroupCommandOptions.fromArgs(cgcArgs);
|
||||
try (ConsumerGroupCommand.ConsumerGroupService consumerGroupService = new ConsumerGroupCommand.ConsumerGroupService(opts, Collections.emptyMap())) {
|
||||
try (ConsumerGroupCommand.ConsumerGroupService consumerGroupService = new ConsumerGroupCommand.ConsumerGroupService(opts, Map.of())) {
|
||||
consumerGroupService.describeGroups();
|
||||
fail("Non-existent group should throw an exception");
|
||||
} catch (ExecutionException e) {
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.apache.kafka.common.utils.Utils;
|
|||
|
||||
import java.time.Duration;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
|
@ -55,7 +54,7 @@ class ConsumerGroupCommandTestUtils {
|
|||
String topic,
|
||||
Supplier<KafkaConsumer<T, T>> consumerSupplier) {
|
||||
return buildConsumers(numberOfConsumers, syncCommit, consumerSupplier,
|
||||
consumer -> consumer.subscribe(Collections.singleton(topic)));
|
||||
consumer -> consumer.subscribe(Set.of(topic)));
|
||||
}
|
||||
|
||||
static <T> AutoCloseable buildConsumers(
|
||||
|
|
|
@ -44,9 +44,7 @@ import org.mockito.ArgumentMatcher;
|
|||
import org.mockito.ArgumentMatchers;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
|
@ -73,11 +71,11 @@ public class ConsumerGroupServiceTest {
|
|||
|
||||
public static final int NUM_PARTITIONS = 10;
|
||||
|
||||
private static final List<String> TOPICS = IntStream.range(0, 5).mapToObj(i -> "testTopic" + i).collect(Collectors.toList());
|
||||
private static final List<String> TOPICS = IntStream.range(0, 5).mapToObj(i -> "testTopic" + i).toList();
|
||||
|
||||
private static final List<TopicPartition> TOPIC_PARTITIONS = TOPICS.stream()
|
||||
.flatMap(topic -> IntStream.range(0, NUM_PARTITIONS).mapToObj(i -> new TopicPartition(topic, i)))
|
||||
.collect(Collectors.toList());
|
||||
.toList();
|
||||
|
||||
private final Admin admin = mock(Admin.class);
|
||||
|
||||
|
@ -86,7 +84,7 @@ public class ConsumerGroupServiceTest {
|
|||
String[] args = new String[]{"--bootstrap-server", "localhost:9092", "--group", GROUP, "--describe", "--offsets"};
|
||||
ConsumerGroupCommand.ConsumerGroupService groupService = consumerGroupService(args);
|
||||
|
||||
when(admin.describeConsumerGroups(ArgumentMatchers.eq(Collections.singletonList(GROUP)), any()))
|
||||
when(admin.describeConsumerGroups(ArgumentMatchers.eq(List.of(GROUP)), any()))
|
||||
.thenReturn(describeGroupsResult(GroupState.STABLE));
|
||||
when(admin.listConsumerGroupOffsets(ArgumentMatchers.eq(listConsumerGroupOffsetsSpec()), any()))
|
||||
.thenReturn(listGroupOffsetsResult(GROUP));
|
||||
|
@ -100,7 +98,7 @@ public class ConsumerGroupServiceTest {
|
|||
assertTrue(statesAndAssignments.getValue().isPresent());
|
||||
assertEquals(TOPIC_PARTITIONS.size(), statesAndAssignments.getValue().get().size());
|
||||
|
||||
verify(admin, times(1)).describeConsumerGroups(ArgumentMatchers.eq(Collections.singletonList(GROUP)), any());
|
||||
verify(admin, times(1)).describeConsumerGroups(ArgumentMatchers.eq(List.of(GROUP)), any());
|
||||
verify(admin, times(1)).listConsumerGroupOffsets(ArgumentMatchers.eq(listConsumerGroupOffsetsSpec()), any());
|
||||
verify(admin, times(1)).listOffsets(offsetsArgMatcher(), any());
|
||||
}
|
||||
|
@ -142,7 +140,7 @@ public class ConsumerGroupServiceTest {
|
|||
|
||||
ConsumerGroupDescription consumerGroupDescription = new ConsumerGroupDescription(GROUP,
|
||||
true,
|
||||
Collections.singleton(
|
||||
Set.of(
|
||||
new MemberDescription(
|
||||
"member1", Optional.of("instance1"), "client1", "host1", new MemberAssignment(assignedTopicPartitions),
|
||||
Optional.empty(), Optional.empty(), Optional.empty()
|
||||
|
@ -161,12 +159,12 @@ public class ConsumerGroupServiceTest {
|
|||
|
||||
KafkaFutureImpl<ConsumerGroupDescription> future = new KafkaFutureImpl<>();
|
||||
future.complete(consumerGroupDescription);
|
||||
when(admin.describeConsumerGroups(ArgumentMatchers.eq(Collections.singletonList(GROUP)), any()))
|
||||
.thenReturn(new DescribeConsumerGroupsResult(Collections.singletonMap(GROUP, future)));
|
||||
when(admin.describeConsumerGroups(ArgumentMatchers.eq(List.of(GROUP)), any()))
|
||||
.thenReturn(new DescribeConsumerGroupsResult(Map.of(GROUP, future)));
|
||||
when(admin.listConsumerGroupOffsets(ArgumentMatchers.eq(listConsumerGroupOffsetsSpec()), any()))
|
||||
.thenReturn(
|
||||
AdminClientTestUtils.listConsumerGroupOffsetsResult(
|
||||
Collections.singletonMap(GROUP, committedOffsets)));
|
||||
Map.of(GROUP, committedOffsets)));
|
||||
when(admin.listOffsets(
|
||||
ArgumentMatchers.argThat(offsetsArgMatcher.apply(assignedTopicPartitions)),
|
||||
any()
|
||||
|
@ -187,12 +185,12 @@ public class ConsumerGroupServiceTest {
|
|||
results.stream().collect(Collectors.toMap(
|
||||
assignment -> new TopicPartition(assignment.topic.get(), assignment.partition.get()),
|
||||
assignment -> assignment.offset))
|
||||
).orElse(Collections.emptyMap());
|
||||
).orElse(Map.of());
|
||||
Map<TopicPartition, Optional<Integer>> returnedLeaderEpoch = assignments.map(results ->
|
||||
results.stream().collect(Collectors.toMap(
|
||||
assignment -> new TopicPartition(assignment.topic.get(), assignment.partition.get()),
|
||||
assignment -> assignment.leaderEpoch))
|
||||
).orElse(Collections.emptyMap());
|
||||
).orElse(Map.of());
|
||||
|
||||
Map<TopicPartition, Optional<Long>> expectedOffsets = Map.of(
|
||||
testTopicPartition0, Optional.empty(),
|
||||
|
@ -215,7 +213,7 @@ public class ConsumerGroupServiceTest {
|
|||
assertEquals(expectedOffsets, returnedOffsets);
|
||||
assertEquals(expectedLeaderEpoch, returnedLeaderEpoch);
|
||||
|
||||
verify(admin, times(1)).describeConsumerGroups(ArgumentMatchers.eq(Collections.singletonList(GROUP)), any());
|
||||
verify(admin, times(1)).describeConsumerGroups(ArgumentMatchers.eq(List.of(GROUP)), any());
|
||||
verify(admin, times(1)).listConsumerGroupOffsets(ArgumentMatchers.eq(listConsumerGroupOffsetsSpec()), any());
|
||||
verify(admin, times(1)).listOffsets(ArgumentMatchers.argThat(offsetsArgMatcher.apply(assignedTopicPartitions)), any());
|
||||
verify(admin, times(1)).listOffsets(ArgumentMatchers.argThat(offsetsArgMatcher.apply(unassignedTopicPartitions)), any());
|
||||
|
@ -223,15 +221,15 @@ public class ConsumerGroupServiceTest {
|
|||
|
||||
@Test
|
||||
public void testAdminRequestsForResetOffsets() {
|
||||
List<String> args = new ArrayList<>(Arrays.asList("--bootstrap-server", "localhost:9092", "--group", GROUP, "--reset-offsets", "--to-latest"));
|
||||
List<String> args = new ArrayList<>(List.of("--bootstrap-server", "localhost:9092", "--group", GROUP, "--reset-offsets", "--to-latest"));
|
||||
List<String> topicsWithoutPartitionsSpecified = TOPICS.subList(1, TOPICS.size());
|
||||
List<String> topicArgs = new ArrayList<>(Arrays.asList("--topic", TOPICS.get(0) + ":" + (IntStream.range(0, NUM_PARTITIONS).mapToObj(Integer::toString).collect(Collectors.joining(",")))));
|
||||
topicsWithoutPartitionsSpecified.forEach(topic -> topicArgs.addAll(Arrays.asList("--topic", topic)));
|
||||
List<String> topicArgs = new ArrayList<>(List.of("--topic", TOPICS.get(0) + ":" + (IntStream.range(0, NUM_PARTITIONS).mapToObj(Integer::toString).collect(Collectors.joining(",")))));
|
||||
topicsWithoutPartitionsSpecified.forEach(topic -> topicArgs.addAll(List.of("--topic", topic)));
|
||||
|
||||
args.addAll(topicArgs);
|
||||
ConsumerGroupCommand.ConsumerGroupService groupService = consumerGroupService(args.toArray(new String[0]));
|
||||
|
||||
when(admin.describeConsumerGroups(ArgumentMatchers.eq(Collections.singletonList(GROUP)), any()))
|
||||
when(admin.describeConsumerGroups(ArgumentMatchers.eq(List.of(GROUP)), any()))
|
||||
.thenReturn(describeGroupsResult(GroupState.DEAD));
|
||||
when(admin.describeTopics(ArgumentMatchers.eq(topicsWithoutPartitionsSpecified), any()))
|
||||
.thenReturn(describeTopicsResult(topicsWithoutPartitionsSpecified));
|
||||
|
@ -241,16 +239,16 @@ public class ConsumerGroupServiceTest {
|
|||
.thenReturn(listOffsetsResult());
|
||||
|
||||
Map<String, Map<TopicPartition, OffsetAndMetadata>> resetResult = groupService.resetOffsets();
|
||||
assertEquals(Collections.singleton(GROUP), resetResult.keySet());
|
||||
assertEquals(Set.of(GROUP), resetResult.keySet());
|
||||
assertEquals(new HashSet<>(TOPIC_PARTITIONS), resetResult.get(GROUP).keySet());
|
||||
|
||||
verify(admin, times(1)).describeConsumerGroups(ArgumentMatchers.eq(Collections.singletonList(GROUP)), any());
|
||||
verify(admin, times(1)).describeConsumerGroups(ArgumentMatchers.eq(List.of(GROUP)), any());
|
||||
verify(admin, times(1)).describeTopics(ArgumentMatchers.eq(topicsWithoutPartitionsSpecified), any());
|
||||
verify(admin, times(1)).listOffsets(offsetsArgMatcher(), any());
|
||||
}
|
||||
|
||||
private ConsumerGroupCommand.ConsumerGroupService consumerGroupService(String[] args) {
|
||||
return new ConsumerGroupCommand.ConsumerGroupService(ConsumerGroupCommandOptions.fromArgs(args), Collections.emptyMap()) {
|
||||
return new ConsumerGroupCommand.ConsumerGroupService(ConsumerGroupCommandOptions.fromArgs(args), Map.of()) {
|
||||
@Override
|
||||
protected Admin createAdminClient(Map<String, String> configOverrides) {
|
||||
return admin;
|
||||
|
@ -265,7 +263,7 @@ public class ConsumerGroupServiceTest {
|
|||
Optional.empty(), Optional.empty(), Optional.empty());
|
||||
ConsumerGroupDescription description = new ConsumerGroupDescription(GROUP,
|
||||
true,
|
||||
Collections.singleton(member1),
|
||||
Set.of(member1),
|
||||
RangeAssignor.class.getName(),
|
||||
GroupType.CLASSIC,
|
||||
groupState,
|
||||
|
@ -275,14 +273,14 @@ public class ConsumerGroupServiceTest {
|
|||
Optional.empty());
|
||||
KafkaFutureImpl<ConsumerGroupDescription> future = new KafkaFutureImpl<>();
|
||||
future.complete(description);
|
||||
return new DescribeConsumerGroupsResult(Collections.singletonMap(GROUP, future));
|
||||
return new DescribeConsumerGroupsResult(Map.of(GROUP, future));
|
||||
}
|
||||
|
||||
private ListConsumerGroupOffsetsResult listGroupOffsetsResult(String groupId) {
|
||||
Map<TopicPartition, OffsetAndMetadata> offsets = TOPIC_PARTITIONS.stream().collect(Collectors.toMap(
|
||||
Function.identity(),
|
||||
__ -> new OffsetAndMetadata(100)));
|
||||
return AdminClientTestUtils.listConsumerGroupOffsetsResult(Collections.singletonMap(groupId, offsets));
|
||||
return AdminClientTestUtils.listConsumerGroupOffsetsResult(Map.of(groupId, offsets));
|
||||
}
|
||||
|
||||
private Map<TopicPartition, OffsetSpec> offsetsArgMatcher() {
|
||||
|
@ -316,18 +314,14 @@ public class ConsumerGroupServiceTest {
|
|||
}
|
||||
|
||||
private DescribeTopicsResult describeTopicsResult(Collection<String> topics) {
|
||||
Map<String, TopicDescription> topicDescriptions = new HashMap<>();
|
||||
|
||||
topics.forEach(topic -> {
|
||||
List<TopicPartitionInfo> partitions = IntStream.range(0, NUM_PARTITIONS)
|
||||
.mapToObj(i -> new TopicPartitionInfo(i, Node.noNode(), Collections.emptyList(), Collections.emptyList()))
|
||||
.collect(Collectors.toList());
|
||||
topicDescriptions.put(topic, new TopicDescription(topic, false, partitions));
|
||||
});
|
||||
var topicDescriptions = topics.stream().collect(Collectors.toMap(Function.identity(),
|
||||
topic -> new TopicDescription(topic, false, IntStream.range(0, NUM_PARTITIONS)
|
||||
.mapToObj(i -> new TopicPartitionInfo(i, Node.noNode(), List.of(), List.of()))
|
||||
.toList())));
|
||||
return AdminClientTestUtils.describeTopicsResult(topicDescriptions);
|
||||
}
|
||||
|
||||
private Map<String, ListConsumerGroupOffsetsSpec> listConsumerGroupOffsetsSpec() {
|
||||
return Collections.singletonMap(GROUP, new ListConsumerGroupOffsetsSpec());
|
||||
return Map.of(GROUP, new ListConsumerGroupOffsetsSpec());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -47,8 +47,6 @@ import java.util.stream.IntStream;
|
|||
|
||||
import joptsimple.OptionException;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static org.apache.kafka.clients.consumer.ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG;
|
||||
import static org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_ID_CONFIG;
|
||||
import static org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_PROTOCOL_CONFIG;
|
||||
|
@ -313,7 +311,7 @@ public class DeleteConsumerGroupsTest {
|
|||
cluster,
|
||||
groupId,
|
||||
protocol.name,
|
||||
emptyMap());
|
||||
Map.of());
|
||||
|
||||
return ConsumerGroupCommandTestUtils.buildConsumers(
|
||||
1,
|
||||
|
@ -331,7 +329,7 @@ public class DeleteConsumerGroupsTest {
|
|||
ConsumerGroupCommandOptions opts = ConsumerGroupCommandOptions.fromArgs(args);
|
||||
return new ConsumerGroupCommand.ConsumerGroupService(
|
||||
opts,
|
||||
singletonMap(AdminClientConfig.RETRIES_CONFIG, Integer.toString(Integer.MAX_VALUE))
|
||||
Map.of(AdminClientConfig.RETRIES_CONFIG, Integer.toString(Integer.MAX_VALUE))
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -41,8 +41,8 @@ import org.apache.kafka.coordinator.group.GroupCoordinatorConfig;
|
|||
import org.junit.jupiter.api.Assertions;
|
||||
|
||||
import java.time.Duration;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
|
||||
|
@ -80,7 +80,7 @@ public class DeleteOffsetsConsumerGroupCommandIntegrationTest {
|
|||
String group = "missing.group";
|
||||
String topic = "foo:1";
|
||||
try (ConsumerGroupCommand.ConsumerGroupService consumerGroupService = consumerGroupService(getArgs(group, topic))) {
|
||||
Entry<Errors, Map<TopicPartition, Throwable>> res = consumerGroupService.deleteOffsets(group, Collections.singletonList(topic));
|
||||
Entry<Errors, Map<TopicPartition, Throwable>> res = consumerGroupService.deleteOffsets(group, List.of(topic));
|
||||
assertEquals(Errors.GROUP_ID_NOT_FOUND, res.getKey());
|
||||
}
|
||||
}
|
||||
|
@ -185,7 +185,7 @@ public class DeleteOffsetsConsumerGroupCommandIntegrationTest {
|
|||
private static ConsumerGroupCommand.ConsumerGroupService consumerGroupService(String[] args) {
|
||||
return new ConsumerGroupCommand.ConsumerGroupService(
|
||||
ConsumerGroupCommandOptions.fromArgs(args),
|
||||
Collections.singletonMap(AdminClientConfig.RETRIES_CONFIG, Integer.toString(Integer.MAX_VALUE))
|
||||
Map.of(AdminClientConfig.RETRIES_CONFIG, Integer.toString(Integer.MAX_VALUE))
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -197,7 +197,7 @@ public class DeleteOffsetsConsumerGroupCommandIntegrationTest {
|
|||
return () -> {
|
||||
String topic = inputPartition >= 0 ? inputTopic + ":" + inputPartition : inputTopic;
|
||||
try (ConsumerGroupCommand.ConsumerGroupService consumerGroupService = consumerGroupService(getArgs(inputGroup, topic))) {
|
||||
Entry<Errors, Map<TopicPartition, Throwable>> res = consumerGroupService.deleteOffsets(inputGroup, Collections.singletonList(topic));
|
||||
Entry<Errors, Map<TopicPartition, Throwable>> res = consumerGroupService.deleteOffsets(inputGroup, List.of(topic));
|
||||
Errors topLevelError = res.getKey();
|
||||
Map<TopicPartition, Throwable> partitions = res.getValue();
|
||||
TopicPartition tp = new TopicPartition(inputTopic, expectedPartition);
|
||||
|
@ -219,7 +219,7 @@ public class DeleteOffsetsConsumerGroupCommandIntegrationTest {
|
|||
Runnable validateRunnable) {
|
||||
produceRecord(inputTopic);
|
||||
try (Consumer<byte[], byte[]> consumer = createConsumer(inputGroup, groupProtocol)) {
|
||||
consumer.subscribe(Collections.singletonList(inputTopic));
|
||||
consumer.subscribe(List.of(inputTopic));
|
||||
ConsumerRecords<byte[], byte[]> records = consumer.poll(Duration.ofMillis(DEFAULT_MAX_WAIT_MS));
|
||||
Assertions.assertNotEquals(0, records.count());
|
||||
consumer.commitSync();
|
||||
|
@ -260,14 +260,14 @@ public class DeleteOffsetsConsumerGroupCommandIntegrationTest {
|
|||
}
|
||||
|
||||
private void createTopic(String topic) {
|
||||
try (Admin admin = Admin.create(Collections.singletonMap(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()))) {
|
||||
Assertions.assertDoesNotThrow(() -> admin.createTopics(Collections.singletonList(new NewTopic(topic, 1, (short) 1))).topicId(topic).get());
|
||||
try (Admin admin = Admin.create(Map.of(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()))) {
|
||||
Assertions.assertDoesNotThrow(() -> admin.createTopics(List.of(new NewTopic(topic, 1, (short) 1))).topicId(topic).get());
|
||||
}
|
||||
}
|
||||
|
||||
private void removeTopic(String topic) {
|
||||
try (Admin admin = Admin.create(Collections.singletonMap(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()))) {
|
||||
Assertions.assertDoesNotThrow(() -> admin.deleteTopics(Collections.singletonList(topic)).all());
|
||||
try (Admin admin = Admin.create(Map.of(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()))) {
|
||||
Assertions.assertDoesNotThrow(() -> admin.deleteTopics(List.of(topic)).all());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -52,7 +52,6 @@ import org.junit.jupiter.api.Timeout;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -65,7 +64,6 @@ import java.util.concurrent.ExecutionException;
|
|||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.IntStream;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
|
@ -106,7 +104,7 @@ public class DescribeConsumerGroupTest {
|
|||
|
||||
for (List<String> describeType : DESCRIBE_TYPES) {
|
||||
// note the group to be queried is a different (non-existing) group
|
||||
List<String> cgcArgs = new ArrayList<>(Arrays.asList("--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", missingGroup));
|
||||
List<String> cgcArgs = new ArrayList<>(List.of("--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", missingGroup));
|
||||
cgcArgs.addAll(describeType);
|
||||
try (ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(cgcArgs.toArray(new String[0]))) {
|
||||
service.describeGroups();
|
||||
|
@ -130,7 +128,7 @@ public class DescribeConsumerGroupTest {
|
|||
createTopic(topic);
|
||||
|
||||
// run one consumer in the group consuming from a single-partition topic
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Collections.emptyMap());
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of());
|
||||
// note the group to be queried is a different (non-existing) group
|
||||
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", missingGroup})
|
||||
) {
|
||||
|
@ -151,7 +149,7 @@ public class DescribeConsumerGroupTest {
|
|||
String topic = TOPIC_PREFIX + groupProtocol.name();
|
||||
String group = GROUP_PREFIX + groupProtocol.name();
|
||||
createTopic(topic);
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Collections.emptyMap());
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of());
|
||||
// note the group to be queried is a different (non-existing) group
|
||||
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", missingGroup})
|
||||
) {
|
||||
|
@ -172,7 +170,7 @@ public class DescribeConsumerGroupTest {
|
|||
String topic = TOPIC_PREFIX + groupProtocol.name();
|
||||
String group = GROUP_PREFIX + groupProtocol.name();
|
||||
createTopic(topic);
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Collections.emptyMap());
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of());
|
||||
// note the group to be queried is a different (non-existing) group
|
||||
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", missingGroup})
|
||||
) {
|
||||
|
@ -295,7 +293,7 @@ public class DescribeConsumerGroupTest {
|
|||
);
|
||||
|
||||
try (AutoCloseable consumerConsumer = consumerGroupClosable(GroupProtocol.CONSUMER, group, topic, Map.of(ConsumerConfig.CLIENT_ID_CONFIG, consumerClientId));
|
||||
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(cgcArgs.toArray(new String[0]));
|
||||
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(cgcArgs.toArray(new String[0]))
|
||||
) {
|
||||
TestUtils.waitForCondition(() -> {
|
||||
Entry<String, String> res = ToolsTestUtils.grabConsoleOutputAndError(describeGroups(service));
|
||||
|
@ -378,14 +376,14 @@ public class DescribeConsumerGroupTest {
|
|||
List<String> groups = new ArrayList<>();
|
||||
for (List<String> describeType : DESCRIBE_TYPES) {
|
||||
String group = GROUP_PREFIX + groupProtocol.name() + "." + String.join("", describeType);
|
||||
groups.addAll(Arrays.asList("--group", group));
|
||||
protocolConsumerGroupExecutors.add(consumerGroupClosable(groupProtocol, group, topic, Collections.emptyMap()));
|
||||
groups.addAll(List.of("--group", group));
|
||||
protocolConsumerGroupExecutors.add(consumerGroupClosable(groupProtocol, group, topic, Map.of()));
|
||||
}
|
||||
|
||||
int expectedNumLines = DESCRIBE_TYPES.size() * 2;
|
||||
|
||||
for (List<String> describeType : DESCRIBE_TYPES) {
|
||||
List<String> cgcArgs = new ArrayList<>(Arrays.asList("--bootstrap-server", clusterInstance.bootstrapServers(), "--describe"));
|
||||
List<String> cgcArgs = new ArrayList<>(List.of("--bootstrap-server", clusterInstance.bootstrapServers(), "--describe"));
|
||||
cgcArgs.addAll(groups);
|
||||
cgcArgs.addAll(describeType);
|
||||
try (ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(cgcArgs.toArray(new String[0]))) {
|
||||
|
@ -420,7 +418,7 @@ public class DescribeConsumerGroupTest {
|
|||
for (List<String> describeType : DESCRIBE_TYPES) {
|
||||
String group = GROUP_PREFIX + groupProtocol.name() + "." + String.join("", describeType);
|
||||
groups.add(group);
|
||||
protocolConsumerGroupExecutors.add(consumerGroupClosable(groupProtocol, group, topic, Collections.emptyMap()));
|
||||
protocolConsumerGroupExecutors.add(consumerGroupClosable(groupProtocol, group, topic, Map.of()));
|
||||
}
|
||||
int expectedNumLines = DESCRIBE_TYPES.size() * 2;
|
||||
for (List<String> describeType : DESCRIBE_TYPES) {
|
||||
|
@ -456,7 +454,7 @@ public class DescribeConsumerGroupTest {
|
|||
createTopic(topic);
|
||||
|
||||
// run one consumer in the group consuming from a single-partition topic
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Collections.emptyMap());
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of());
|
||||
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group})
|
||||
) {
|
||||
TestUtils.waitForCondition(() -> {
|
||||
|
@ -496,12 +494,12 @@ public class DescribeConsumerGroupTest {
|
|||
createTopic(topic);
|
||||
|
||||
// run one consumer in the group consuming from a single-partition topic
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Collections.emptyMap());
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of());
|
||||
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group});
|
||||
Admin admin = Admin.create(Collections.singletonMap(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()))
|
||||
Admin admin = Admin.create(Map.of(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()))
|
||||
) {
|
||||
TestUtils.waitForCondition(() -> {
|
||||
ConsumerGroupDescription consumerGroupDescription = admin.describeConsumerGroups(Collections.singleton(group)).describedGroups().get(group).get();
|
||||
ConsumerGroupDescription consumerGroupDescription = admin.describeConsumerGroups(Set.of(group)).describedGroups().get(group).get();
|
||||
return consumerGroupDescription.members().size() == 1 && consumerGroupDescription.members().iterator().next().assignment().topicPartitions().size() == 1;
|
||||
}, "Expected a 'Stable' group status, rows and valid member information for group " + group + ".");
|
||||
|
||||
|
@ -523,7 +521,7 @@ public class DescribeConsumerGroupTest {
|
|||
createTopic(topic);
|
||||
|
||||
// run one consumer in the group consuming from a single-partition topic
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Collections.singletonMap(ConsumerConfig.GROUP_REMOTE_ASSIGNOR_CONFIG, groupProtocol == GroupProtocol.CONSUMER ? "range" : ""));
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of(ConsumerConfig.GROUP_REMOTE_ASSIGNOR_CONFIG, groupProtocol == GroupProtocol.CONSUMER ? "range" : ""));
|
||||
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group})
|
||||
) {
|
||||
TestUtils.waitForCondition(() -> {
|
||||
|
@ -550,10 +548,10 @@ public class DescribeConsumerGroupTest {
|
|||
try {
|
||||
String expectedName;
|
||||
if (groupProtocol.equals(GroupProtocol.CONSUMER)) {
|
||||
protocolConsumerGroupExecutor = consumerGroupClosable(GroupProtocol.CONSUMER, group, topic, Collections.singletonMap(ConsumerConfig.GROUP_REMOTE_ASSIGNOR_CONFIG, "range"));
|
||||
protocolConsumerGroupExecutor = consumerGroupClosable(GroupProtocol.CONSUMER, group, topic, Map.of(ConsumerConfig.GROUP_REMOTE_ASSIGNOR_CONFIG, "range"));
|
||||
expectedName = RangeAssignor.RANGE_ASSIGNOR_NAME;
|
||||
} else {
|
||||
protocolConsumerGroupExecutor = consumerGroupClosable(GroupProtocol.CLASSIC, group, topic, Collections.singletonMap(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, RoundRobinAssignor.class.getName()));
|
||||
protocolConsumerGroupExecutor = consumerGroupClosable(GroupProtocol.CLASSIC, group, topic, Map.of(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, RoundRobinAssignor.class.getName()));
|
||||
expectedName = RoundRobinAssignor.ROUNDROBIN_ASSIGNOR_NAME;
|
||||
}
|
||||
|
||||
|
@ -587,7 +585,7 @@ public class DescribeConsumerGroupTest {
|
|||
List<String> cgcArgs = new ArrayList<>(List.of("--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group));
|
||||
cgcArgs.addAll(describeType);
|
||||
// run one consumer in the group consuming from a single-partition topic
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Collections.emptyMap());
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of());
|
||||
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(cgcArgs.toArray(new String[0]))
|
||||
) {
|
||||
TestUtils.waitForCondition(() -> {
|
||||
|
@ -615,7 +613,7 @@ public class DescribeConsumerGroupTest {
|
|||
createTopic(topic);
|
||||
|
||||
// run one consumer in the group consuming from a single-partition topic
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Collections.emptyMap());
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of());
|
||||
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group})
|
||||
) {
|
||||
TestUtils.waitForCondition(() -> {
|
||||
|
@ -631,7 +629,7 @@ public class DescribeConsumerGroupTest {
|
|||
Entry<Optional<GroupState>, Optional<Collection<PartitionAssignmentState>>> offsets = service.collectGroupOffsets(group);
|
||||
Optional<GroupState> state = offsets.getKey();
|
||||
Optional<Collection<PartitionAssignmentState>> assignments = offsets.getValue();
|
||||
List<PartitionAssignmentState> testGroupAssignments = assignments.get().stream().filter(a -> Objects.equals(a.group, group)).collect(Collectors.toList());
|
||||
List<PartitionAssignmentState> testGroupAssignments = assignments.get().stream().filter(a -> Objects.equals(a.group, group)).toList();
|
||||
PartitionAssignmentState assignment = testGroupAssignments.get(0);
|
||||
return state.map(s -> s.equals(GroupState.EMPTY)).orElse(false) &&
|
||||
testGroupAssignments.size() == 1 &&
|
||||
|
@ -652,7 +650,7 @@ public class DescribeConsumerGroupTest {
|
|||
createTopic(topic);
|
||||
|
||||
// run one consumer in the group consuming from a single-partition topic
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Collections.emptyMap());
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of());
|
||||
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group})
|
||||
) {
|
||||
TestUtils.waitForCondition(() -> {
|
||||
|
@ -681,7 +679,7 @@ public class DescribeConsumerGroupTest {
|
|||
createTopic(topic);
|
||||
|
||||
// run one consumer in the group consuming from a single-partition topic
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Collections.emptyMap());
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of());
|
||||
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group})
|
||||
) {
|
||||
TestUtils.waitForCondition(() -> {
|
||||
|
@ -712,10 +710,10 @@ public class DescribeConsumerGroupTest {
|
|||
|
||||
for (List<String> describeType : DESCRIBE_TYPES) {
|
||||
String group = GROUP_PREFIX + groupProtocol.name() + String.join("", describeType);
|
||||
List<String> cgcArgs = new ArrayList<>(Arrays.asList("--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group));
|
||||
List<String> cgcArgs = new ArrayList<>(List.of("--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group));
|
||||
cgcArgs.addAll(describeType);
|
||||
// run two consumers in the group consuming from a single-partition topic
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Collections.emptyMap(), 2);
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of(), 2);
|
||||
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(cgcArgs.toArray(new String[0]))
|
||||
) {
|
||||
TestUtils.waitForCondition(() -> {
|
||||
|
@ -739,7 +737,7 @@ public class DescribeConsumerGroupTest {
|
|||
createTopic(topic);
|
||||
|
||||
// run two consumers in the group consuming from a single-partition topic
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Collections.emptyMap(), 2);
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of(), 2);
|
||||
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group})
|
||||
) {
|
||||
TestUtils.waitForCondition(() -> {
|
||||
|
@ -762,7 +760,7 @@ public class DescribeConsumerGroupTest {
|
|||
createTopic(topic);
|
||||
|
||||
// run two consumers in the group consuming from a single-partition topic
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Collections.emptyMap(), 2);
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of(), 2);
|
||||
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group})
|
||||
) {
|
||||
TestUtils.waitForCondition(() -> {
|
||||
|
@ -792,7 +790,7 @@ public class DescribeConsumerGroupTest {
|
|||
createTopic(topic);
|
||||
|
||||
// run two consumers in the group consuming from a single-partition topic
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Collections.emptyMap(), 2);
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of(), 2);
|
||||
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group})
|
||||
) {
|
||||
TestUtils.waitForCondition(() -> {
|
||||
|
@ -812,10 +810,10 @@ public class DescribeConsumerGroupTest {
|
|||
|
||||
for (List<String> describeType : DESCRIBE_TYPES) {
|
||||
String group = GROUP_PREFIX + groupProtocol.name() + String.join("", describeType);
|
||||
List<String> cgcArgs = new ArrayList<>(Arrays.asList("--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group));
|
||||
List<String> cgcArgs = new ArrayList<>(List.of("--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group));
|
||||
cgcArgs.addAll(describeType);
|
||||
// run two consumers in the group consuming from a two-partition topic
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Collections.emptyMap(), 2);
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of(), 2);
|
||||
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(cgcArgs.toArray(new String[0]))
|
||||
) {
|
||||
TestUtils.waitForCondition(() -> {
|
||||
|
@ -839,7 +837,7 @@ public class DescribeConsumerGroupTest {
|
|||
createTopic(topic, 2);
|
||||
|
||||
// run two consumers in the group consuming from a two-partition topic
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Collections.emptyMap(), 2);
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of(), 2);
|
||||
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group})
|
||||
) {
|
||||
TestUtils.waitForCondition(() -> {
|
||||
|
@ -863,7 +861,7 @@ public class DescribeConsumerGroupTest {
|
|||
createTopic(topic, 2);
|
||||
|
||||
// run two consumers in the group consuming from a two-partition topic
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Collections.emptyMap(), 2);
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of(), 2);
|
||||
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group})
|
||||
) {
|
||||
TestUtils.waitForCondition(() -> {
|
||||
|
@ -891,7 +889,7 @@ public class DescribeConsumerGroupTest {
|
|||
createTopic(topic, 2);
|
||||
|
||||
// run two consumers in the group consuming from a two-partition topic
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Collections.emptyMap(), 2);
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of(), 2);
|
||||
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group})
|
||||
) {
|
||||
TestUtils.waitForCondition(() -> {
|
||||
|
@ -911,7 +909,7 @@ public class DescribeConsumerGroupTest {
|
|||
String group = GROUP_PREFIX + groupProtocol.name();
|
||||
createTopic(topic, 2);
|
||||
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(GroupProtocol.CLASSIC, group, Set.of(new TopicPartition(topic, 0), new TopicPartition(topic, 1)), Collections.emptyMap());
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(GroupProtocol.CLASSIC, group, Set.of(new TopicPartition(topic, 0), new TopicPartition(topic, 1)), Map.of());
|
||||
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group})
|
||||
) {
|
||||
TestUtils.waitForCondition(() -> {
|
||||
|
@ -936,11 +934,11 @@ public class DescribeConsumerGroupTest {
|
|||
String group = GROUP_PREFIX + groupProtocol.name() + String.join("", describeType);
|
||||
|
||||
// set the group initialization timeout too low for the group to stabilize
|
||||
List<String> cgcArgs = new ArrayList<>(Arrays.asList("--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--timeout", "1", "--group", group));
|
||||
List<String> cgcArgs = new ArrayList<>(List.of("--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--timeout", "1", "--group", group));
|
||||
cgcArgs.addAll(describeType);
|
||||
|
||||
// run one consumer in the group consuming from a single-partition topic
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Collections.emptyMap());
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of());
|
||||
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(cgcArgs.toArray(new String[0]))
|
||||
) {
|
||||
ExecutionException e = assertThrows(ExecutionException.class, service::describeGroups);
|
||||
|
@ -961,7 +959,7 @@ public class DescribeConsumerGroupTest {
|
|||
// complete before the timeout expires
|
||||
|
||||
// run one consumer in the group consuming from a single-partition topic
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Collections.emptyMap());
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of());
|
||||
// set the group initialization timeout too low for the group to stabilize
|
||||
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group, "--timeout", "1"})
|
||||
) {
|
||||
|
@ -983,7 +981,7 @@ public class DescribeConsumerGroupTest {
|
|||
// complete before the timeout expires
|
||||
|
||||
// run one consumer in the group consuming from a single-partition topic
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Collections.emptyMap());
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of());
|
||||
// set the group initialization timeout too low for the group to stabilize
|
||||
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group, "--timeout", "1"})
|
||||
) {
|
||||
|
@ -1007,7 +1005,7 @@ public class DescribeConsumerGroupTest {
|
|||
// complete before the timeout expires
|
||||
|
||||
// run one consumer in the group consuming from a single-partition topic
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Collections.emptyMap());
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of());
|
||||
// set the group initialization timeout too low for the group to stabilize
|
||||
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group, "--timeout", "1"})
|
||||
) {
|
||||
|
@ -1026,7 +1024,7 @@ public class DescribeConsumerGroupTest {
|
|||
createTopic(topic);
|
||||
|
||||
// run one consumer in the group consuming from a single-partition topic
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Collections.singletonMap(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false"));
|
||||
try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, group, topic, Map.of(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false"));
|
||||
ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group})
|
||||
) {
|
||||
TestUtils.waitForCondition(() -> {
|
||||
|
@ -1175,7 +1173,7 @@ public class DescribeConsumerGroupTest {
|
|||
private static ConsumerGroupCommand.ConsumerGroupService consumerGroupService(String[] args) {
|
||||
return new ConsumerGroupCommand.ConsumerGroupService(
|
||||
ConsumerGroupCommandOptions.fromArgs(args),
|
||||
Collections.singletonMap(AdminClientConfig.RETRIES_CONFIG, Integer.toString(Integer.MAX_VALUE))
|
||||
Map.of(AdminClientConfig.RETRIES_CONFIG, Integer.toString(Integer.MAX_VALUE))
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -1184,20 +1182,20 @@ public class DescribeConsumerGroupTest {
|
|||
}
|
||||
|
||||
private void createTopic(String topic, int numPartitions) {
|
||||
try (Admin admin = Admin.create(Collections.singletonMap(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()))) {
|
||||
Assertions.assertDoesNotThrow(() -> admin.createTopics(Collections.singletonList(new NewTopic(topic, numPartitions, (short) 1))).topicId(topic).get());
|
||||
try (Admin admin = Admin.create(Map.of(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()))) {
|
||||
Assertions.assertDoesNotThrow(() -> admin.createTopics(List.of(new NewTopic(topic, numPartitions, (short) 1))).topicId(topic).get());
|
||||
}
|
||||
}
|
||||
|
||||
private void deleteConsumerGroups(Collection<String> groupIds) {
|
||||
try (Admin admin = Admin.create(Collections.singletonMap(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()))) {
|
||||
try (Admin admin = Admin.create(Map.of(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()))) {
|
||||
Assertions.assertDoesNotThrow(() -> admin.deleteConsumerGroups(groupIds).all().get());
|
||||
}
|
||||
}
|
||||
|
||||
private void deleteTopic(String topic) {
|
||||
try (Admin admin = Admin.create(Collections.singletonMap(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()))) {
|
||||
Assertions.assertDoesNotThrow(() -> admin.deleteTopics(Collections.singletonList(topic)).topicNameValues().get(topic).get());
|
||||
try (Admin admin = Admin.create(Map.of(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()))) {
|
||||
Assertions.assertDoesNotThrow(() -> admin.deleteTopics(List.of(topic)).topicNameValues().get(topic).get());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -43,7 +43,6 @@ import org.junit.jupiter.api.Test;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
|
@ -53,11 +52,9 @@ import java.util.Objects;
|
|||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import joptsimple.OptionException;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static org.apache.kafka.clients.consumer.ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG;
|
||||
import static org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_ID_CONFIG;
|
||||
import static org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_PROTOCOL_CONFIG;
|
||||
|
@ -105,12 +102,12 @@ public class ListConsumerGroupTest {
|
|||
String topicPartitionsGroup = TOPIC_PARTITIONS_GROUP_PREFIX + i;
|
||||
createTopic(topic);
|
||||
|
||||
try (AutoCloseable topicPartitionsConsumerGroupExecutor = consumerGroupClosable(topicPartitionsGroup, Collections.singleton(new TopicPartition(topic, 0)));
|
||||
try (AutoCloseable topicPartitionsConsumerGroupExecutor = consumerGroupClosable(topicPartitionsGroup, Set.of(new TopicPartition(topic, 0)));
|
||||
AutoCloseable topicConsumerGroupExecutor = consumerGroupClosable(GroupProtocol.CLASSIC, topicGroup, topic);
|
||||
AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, protocolGroup, topic);
|
||||
ConsumerGroupCommand.ConsumerGroupService service = getConsumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--list"})
|
||||
) {
|
||||
Set<String> expectedGroups = set(Arrays.asList(topicPartitionsGroup, topicGroup, protocolGroup));
|
||||
Set<String> expectedGroups = set(List.of(topicPartitionsGroup, topicGroup, protocolGroup));
|
||||
final AtomicReference<Set> foundGroups = new AtomicReference<>();
|
||||
|
||||
TestUtils.waitForCondition(() -> {
|
||||
|
@ -119,7 +116,7 @@ public class ListConsumerGroupTest {
|
|||
}, "Expected --list to show groups " + expectedGroups + ", but found " + foundGroups.get() + ".");
|
||||
}
|
||||
|
||||
removeConsumer(set(Arrays.asList(topicPartitionsGroup, topicGroup, protocolGroup)));
|
||||
removeConsumer(set(List.of(topicPartitionsGroup, topicGroup, protocolGroup)));
|
||||
deleteTopic(topic);
|
||||
}
|
||||
}
|
||||
|
@ -139,7 +136,7 @@ public class ListConsumerGroupTest {
|
|||
String topicPartitionsGroup = TOPIC_PARTITIONS_GROUP_PREFIX + i;
|
||||
createTopic(topic);
|
||||
|
||||
try (AutoCloseable topicPartitionsConsumerGroupExecutor = consumerGroupClosable(topicPartitionsGroup, Collections.singleton(new TopicPartition(topic, 0)));
|
||||
try (AutoCloseable topicPartitionsConsumerGroupExecutor = consumerGroupClosable(topicPartitionsGroup, Set.of(new TopicPartition(topic, 0)));
|
||||
AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, protocolGroup, topic);
|
||||
ConsumerGroupCommand.ConsumerGroupService service = getConsumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--list", "--state"})
|
||||
) {
|
||||
|
@ -160,7 +157,7 @@ public class ListConsumerGroupTest {
|
|||
|
||||
assertGroupListing(
|
||||
service,
|
||||
Collections.emptySet(),
|
||||
Set.of(),
|
||||
EnumSet.allOf(GroupState.class),
|
||||
expectedListing
|
||||
);
|
||||
|
@ -176,20 +173,20 @@ public class ListConsumerGroupTest {
|
|||
|
||||
assertGroupListing(
|
||||
service,
|
||||
Collections.emptySet(),
|
||||
Set.of(),
|
||||
Set.of(GroupState.STABLE),
|
||||
expectedListing
|
||||
);
|
||||
|
||||
assertGroupListing(
|
||||
service,
|
||||
Collections.emptySet(),
|
||||
Set.of(),
|
||||
Set.of(GroupState.PREPARING_REBALANCE),
|
||||
Collections.emptySet()
|
||||
Set.of()
|
||||
);
|
||||
}
|
||||
|
||||
removeConsumer(set(Arrays.asList(topicPartitionsGroup, protocolGroup)));
|
||||
removeConsumer(set(List.of(topicPartitionsGroup, protocolGroup)));
|
||||
deleteTopic(topic);
|
||||
}
|
||||
}
|
||||
|
@ -202,7 +199,7 @@ public class ListConsumerGroupTest {
|
|||
String topicPartitionsGroup = TOPIC_PARTITIONS_GROUP_PREFIX + "0";
|
||||
createTopic(topic);
|
||||
|
||||
try (AutoCloseable topicPartitionsConsumerGroupExecutor = consumerGroupClosable(topicPartitionsGroup, Collections.singleton(new TopicPartition(topic, 0)));
|
||||
try (AutoCloseable topicPartitionsConsumerGroupExecutor = consumerGroupClosable(topicPartitionsGroup, Set.of(new TopicPartition(topic, 0)));
|
||||
AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, protocolGroup, topic);
|
||||
ConsumerGroupCommand.ConsumerGroupService service = getConsumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--list", "--state"})
|
||||
) {
|
||||
|
@ -224,8 +221,8 @@ public class ListConsumerGroupTest {
|
|||
// No filters explicitly mentioned. Expectation is that all groups are returned.
|
||||
assertGroupListing(
|
||||
service,
|
||||
Collections.emptySet(),
|
||||
Collections.emptySet(),
|
||||
Set.of(),
|
||||
Set.of(),
|
||||
expectedListing
|
||||
);
|
||||
|
||||
|
@ -235,14 +232,14 @@ public class ListConsumerGroupTest {
|
|||
assertGroupListing(
|
||||
service,
|
||||
Set.of(GroupType.CONSUMER),
|
||||
Collections.emptySet(),
|
||||
Collections.emptySet()
|
||||
Set.of(),
|
||||
Set.of()
|
||||
);
|
||||
|
||||
assertGroupListing(
|
||||
service,
|
||||
Set.of(GroupType.CLASSIC),
|
||||
Collections.emptySet(),
|
||||
Set.of(),
|
||||
expectedListing
|
||||
);
|
||||
}
|
||||
|
@ -257,7 +254,7 @@ public class ListConsumerGroupTest {
|
|||
String topicPartitionsGroup = TOPIC_PARTITIONS_GROUP_PREFIX + "0";
|
||||
createTopic(topic);
|
||||
|
||||
try (AutoCloseable topicPartitionsConsumerGroupExecutor = consumerGroupClosable(topicPartitionsGroup, Collections.singleton(new TopicPartition(topic, 0)));
|
||||
try (AutoCloseable topicPartitionsConsumerGroupExecutor = consumerGroupClosable(topicPartitionsGroup, Set.of(new TopicPartition(topic, 0)));
|
||||
AutoCloseable topicConsumerGroupExecutor = consumerGroupClosable(GroupProtocol.CLASSIC, topicGroup, topic);
|
||||
AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, protocolGroup, topic);
|
||||
ConsumerGroupCommand.ConsumerGroupService service = getConsumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--list"})
|
||||
|
@ -288,8 +285,8 @@ public class ListConsumerGroupTest {
|
|||
|
||||
assertGroupListing(
|
||||
service,
|
||||
Collections.emptySet(),
|
||||
Collections.emptySet(),
|
||||
Set.of(),
|
||||
Set.of(),
|
||||
expectedListing
|
||||
);
|
||||
|
||||
|
@ -307,7 +304,7 @@ public class ListConsumerGroupTest {
|
|||
assertGroupListing(
|
||||
service,
|
||||
Set.of(GroupType.CONSUMER),
|
||||
Collections.emptySet(),
|
||||
Set.of(),
|
||||
expectedListing
|
||||
);
|
||||
|
||||
|
@ -329,7 +326,7 @@ public class ListConsumerGroupTest {
|
|||
assertGroupListing(
|
||||
service,
|
||||
Set.of(GroupType.CLASSIC),
|
||||
Collections.emptySet(),
|
||||
Set.of(),
|
||||
expectedListing
|
||||
);
|
||||
}
|
||||
|
@ -343,78 +340,78 @@ public class ListConsumerGroupTest {
|
|||
String topicPartitionsGroup = TOPIC_PARTITIONS_GROUP_PREFIX + "0";
|
||||
createTopic(topic);
|
||||
|
||||
try (AutoCloseable topicPartitionsConsumerGroupExecutor = consumerGroupClosable(topicPartitionsGroup, Collections.singleton(new TopicPartition(topic, 0)));
|
||||
try (AutoCloseable topicPartitionsConsumerGroupExecutor = consumerGroupClosable(topicPartitionsGroup, Set.of(new TopicPartition(topic, 0)));
|
||||
AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, protocolGroup, topic)
|
||||
) {
|
||||
validateListOutput(
|
||||
Arrays.asList("--bootstrap-server", clusterInstance.bootstrapServers(), "--list"),
|
||||
Collections.emptyList(),
|
||||
List.of("--bootstrap-server", clusterInstance.bootstrapServers(), "--list"),
|
||||
List.of(),
|
||||
Set.of(
|
||||
Collections.singletonList(protocolGroup),
|
||||
Collections.singletonList(topicPartitionsGroup)
|
||||
List.of(protocolGroup),
|
||||
List.of(topicPartitionsGroup)
|
||||
)
|
||||
);
|
||||
|
||||
validateListOutput(
|
||||
Arrays.asList("--bootstrap-server", clusterInstance.bootstrapServers(), "--list", "--state"),
|
||||
Arrays.asList("GROUP", "STATE"),
|
||||
List.of("--bootstrap-server", clusterInstance.bootstrapServers(), "--list", "--state"),
|
||||
List.of("GROUP", "STATE"),
|
||||
Set.of(
|
||||
Arrays.asList(protocolGroup, "Stable"),
|
||||
Arrays.asList(topicPartitionsGroup, "Empty")
|
||||
List.of(protocolGroup, "Stable"),
|
||||
List.of(topicPartitionsGroup, "Empty")
|
||||
)
|
||||
);
|
||||
|
||||
validateListOutput(
|
||||
Arrays.asList("--bootstrap-server", clusterInstance.bootstrapServers(), "--list", "--type"),
|
||||
Arrays.asList("GROUP", "TYPE"),
|
||||
List.of("--bootstrap-server", clusterInstance.bootstrapServers(), "--list", "--type"),
|
||||
List.of("GROUP", "TYPE"),
|
||||
Set.of(
|
||||
Arrays.asList(protocolGroup, "Classic"),
|
||||
Arrays.asList(topicPartitionsGroup, "Classic")
|
||||
List.of(protocolGroup, "Classic"),
|
||||
List.of(topicPartitionsGroup, "Classic")
|
||||
)
|
||||
);
|
||||
|
||||
validateListOutput(
|
||||
Arrays.asList("--bootstrap-server", clusterInstance.bootstrapServers(), "--list", "--type", "--state"),
|
||||
Arrays.asList("GROUP", "TYPE", "STATE"),
|
||||
List.of("--bootstrap-server", clusterInstance.bootstrapServers(), "--list", "--type", "--state"),
|
||||
List.of("GROUP", "TYPE", "STATE"),
|
||||
Set.of(
|
||||
Arrays.asList(protocolGroup, "Classic", "Stable"),
|
||||
Arrays.asList(topicPartitionsGroup, "Classic", "Empty")
|
||||
List.of(protocolGroup, "Classic", "Stable"),
|
||||
List.of(topicPartitionsGroup, "Classic", "Empty")
|
||||
)
|
||||
);
|
||||
|
||||
validateListOutput(
|
||||
Arrays.asList("--bootstrap-server", clusterInstance.bootstrapServers(), "--list", "--state", "Stable"),
|
||||
Arrays.asList("GROUP", "STATE"),
|
||||
List.of("--bootstrap-server", clusterInstance.bootstrapServers(), "--list", "--state", "Stable"),
|
||||
List.of("GROUP", "STATE"),
|
||||
Set.of(
|
||||
Arrays.asList(protocolGroup, "Stable")
|
||||
List.of(protocolGroup, "Stable")
|
||||
)
|
||||
);
|
||||
|
||||
// Check case-insensitivity in state filter.
|
||||
validateListOutput(
|
||||
Arrays.asList("--bootstrap-server", clusterInstance.bootstrapServers(), "--list", "--state", "stable"),
|
||||
Arrays.asList("GROUP", "STATE"),
|
||||
List.of("--bootstrap-server", clusterInstance.bootstrapServers(), "--list", "--state", "stable"),
|
||||
List.of("GROUP", "STATE"),
|
||||
Set.of(
|
||||
Arrays.asList(protocolGroup, "Stable")
|
||||
List.of(protocolGroup, "Stable")
|
||||
)
|
||||
);
|
||||
|
||||
validateListOutput(
|
||||
Arrays.asList("--bootstrap-server", clusterInstance.bootstrapServers(), "--list", "--type", "Classic"),
|
||||
Arrays.asList("GROUP", "TYPE"),
|
||||
List.of("--bootstrap-server", clusterInstance.bootstrapServers(), "--list", "--type", "Classic"),
|
||||
List.of("GROUP", "TYPE"),
|
||||
Set.of(
|
||||
Arrays.asList(protocolGroup, "Classic"),
|
||||
Arrays.asList(topicPartitionsGroup, "Classic")
|
||||
List.of(protocolGroup, "Classic"),
|
||||
List.of(topicPartitionsGroup, "Classic")
|
||||
)
|
||||
);
|
||||
|
||||
// Check case-insensitivity in type filter.
|
||||
validateListOutput(
|
||||
Arrays.asList("--bootstrap-server", clusterInstance.bootstrapServers(), "--list", "--type", "classic"),
|
||||
Arrays.asList("GROUP", "TYPE"),
|
||||
List.of("--bootstrap-server", clusterInstance.bootstrapServers(), "--list", "--type", "classic"),
|
||||
List.of("GROUP", "TYPE"),
|
||||
Set.of(
|
||||
Arrays.asList(protocolGroup, "Classic"),
|
||||
Arrays.asList(topicPartitionsGroup, "Classic")
|
||||
List.of(protocolGroup, "Classic"),
|
||||
List.of(topicPartitionsGroup, "Classic")
|
||||
)
|
||||
);
|
||||
}
|
||||
|
@ -428,58 +425,58 @@ public class ListConsumerGroupTest {
|
|||
String topicPartitionsGroup = TOPIC_PARTITIONS_GROUP_PREFIX + "0";
|
||||
createTopic(topic);
|
||||
|
||||
try (AutoCloseable topicPartitionsConsumerGroupExecutor = consumerGroupClosable(topicPartitionsGroup, Collections.singleton(new TopicPartition(topic, 0)));
|
||||
try (AutoCloseable topicPartitionsConsumerGroupExecutor = consumerGroupClosable(topicPartitionsGroup, Set.of(new TopicPartition(topic, 0)));
|
||||
AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(groupProtocol, protocolGroup, topic)
|
||||
) {
|
||||
validateListOutput(
|
||||
Arrays.asList("--bootstrap-server", clusterInstance.bootstrapServers(), "--list"),
|
||||
Collections.emptyList(),
|
||||
List.of("--bootstrap-server", clusterInstance.bootstrapServers(), "--list"),
|
||||
List.of(),
|
||||
Set.of(
|
||||
Collections.singletonList(protocolGroup),
|
||||
Collections.singletonList(topicPartitionsGroup)
|
||||
List.of(protocolGroup),
|
||||
List.of(topicPartitionsGroup)
|
||||
)
|
||||
);
|
||||
|
||||
validateListOutput(
|
||||
Arrays.asList("--bootstrap-server", clusterInstance.bootstrapServers(), "--list", "--state"),
|
||||
Arrays.asList("GROUP", "STATE"),
|
||||
List.of("--bootstrap-server", clusterInstance.bootstrapServers(), "--list", "--state"),
|
||||
List.of("GROUP", "STATE"),
|
||||
Set.of(
|
||||
Arrays.asList(protocolGroup, "Stable"),
|
||||
Arrays.asList(topicPartitionsGroup, "Empty")
|
||||
List.of(protocolGroup, "Stable"),
|
||||
List.of(topicPartitionsGroup, "Empty")
|
||||
)
|
||||
);
|
||||
|
||||
validateListOutput(
|
||||
Arrays.asList("--bootstrap-server", clusterInstance.bootstrapServers(), "--list", "--type"),
|
||||
Arrays.asList("GROUP", "TYPE"),
|
||||
List.of("--bootstrap-server", clusterInstance.bootstrapServers(), "--list", "--type"),
|
||||
List.of("GROUP", "TYPE"),
|
||||
Set.of(
|
||||
Arrays.asList(protocolGroup, "Consumer"),
|
||||
Arrays.asList(topicPartitionsGroup, "Classic")
|
||||
List.of(protocolGroup, "Consumer"),
|
||||
List.of(topicPartitionsGroup, "Classic")
|
||||
)
|
||||
);
|
||||
|
||||
validateListOutput(
|
||||
Arrays.asList("--bootstrap-server", clusterInstance.bootstrapServers(), "--list", "--type", "--state"),
|
||||
Arrays.asList("GROUP", "TYPE", "STATE"),
|
||||
List.of("--bootstrap-server", clusterInstance.bootstrapServers(), "--list", "--type", "--state"),
|
||||
List.of("GROUP", "TYPE", "STATE"),
|
||||
Set.of(
|
||||
Arrays.asList(protocolGroup, "Consumer", "Stable"),
|
||||
Arrays.asList(topicPartitionsGroup, "Classic", "Empty")
|
||||
List.of(protocolGroup, "Consumer", "Stable"),
|
||||
List.of(topicPartitionsGroup, "Classic", "Empty")
|
||||
)
|
||||
);
|
||||
|
||||
validateListOutput(
|
||||
Arrays.asList("--bootstrap-server", clusterInstance.bootstrapServers(), "--list", "--type", "consumer"),
|
||||
Arrays.asList("GROUP", "TYPE"),
|
||||
List.of("--bootstrap-server", clusterInstance.bootstrapServers(), "--list", "--type", "consumer"),
|
||||
List.of("GROUP", "TYPE"),
|
||||
Set.of(
|
||||
Arrays.asList(protocolGroup, "Consumer")
|
||||
List.of(protocolGroup, "Consumer")
|
||||
)
|
||||
);
|
||||
|
||||
validateListOutput(
|
||||
Arrays.asList("--bootstrap-server", clusterInstance.bootstrapServers(), "--list", "--type", "consumer", "--state", "Stable"),
|
||||
Arrays.asList("GROUP", "TYPE", "STATE"),
|
||||
List.of("--bootstrap-server", clusterInstance.bootstrapServers(), "--list", "--type", "consumer", "--state", "Stable"),
|
||||
List.of("GROUP", "TYPE", "STATE"),
|
||||
Set.of(
|
||||
Arrays.asList(protocolGroup, "Consumer", "Stable")
|
||||
List.of(protocolGroup, "Consumer", "Stable")
|
||||
)
|
||||
);
|
||||
}
|
||||
|
@ -489,7 +486,7 @@ public class ListConsumerGroupTest {
|
|||
Map<String, Object> configs = composeConfigs(
|
||||
groupId,
|
||||
protocol.name,
|
||||
emptyMap()
|
||||
Map.of()
|
||||
);
|
||||
|
||||
return ConsumerGroupCommandTestUtils.buildConsumers(
|
||||
|
@ -504,7 +501,7 @@ public class ListConsumerGroupTest {
|
|||
Map<String, Object> configs = composeConfigs(
|
||||
groupId,
|
||||
GroupProtocol.CLASSIC.name,
|
||||
emptyMap()
|
||||
Map.of()
|
||||
);
|
||||
|
||||
return ConsumerGroupCommandTestUtils.buildConsumers(
|
||||
|
@ -533,26 +530,26 @@ public class ListConsumerGroupTest {
|
|||
ConsumerGroupCommandOptions opts = ConsumerGroupCommandOptions.fromArgs(args);
|
||||
ConsumerGroupCommand.ConsumerGroupService service = new ConsumerGroupCommand.ConsumerGroupService(
|
||||
opts,
|
||||
Collections.singletonMap(AdminClientConfig.RETRIES_CONFIG, Integer.toString(Integer.MAX_VALUE))
|
||||
Map.of(AdminClientConfig.RETRIES_CONFIG, Integer.toString(Integer.MAX_VALUE))
|
||||
);
|
||||
|
||||
return service;
|
||||
}
|
||||
|
||||
private void createTopic(String topic) {
|
||||
try (Admin admin = Admin.create(Collections.singletonMap(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()))) {
|
||||
Assertions.assertDoesNotThrow(() -> admin.createTopics(Collections.singletonList(new NewTopic(topic, 1, (short) 1))).topicId(topic).get());
|
||||
try (Admin admin = Admin.create(Map.of(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()))) {
|
||||
Assertions.assertDoesNotThrow(() -> admin.createTopics(List.of(new NewTopic(topic, 1, (short) 1))).topicId(topic).get());
|
||||
}
|
||||
}
|
||||
|
||||
private void deleteTopic(String topic) {
|
||||
try (Admin admin = Admin.create(Collections.singletonMap(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()))) {
|
||||
Assertions.assertDoesNotThrow(() -> admin.deleteTopics(Collections.singleton(topic)).all().get());
|
||||
try (Admin admin = Admin.create(Map.of(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()))) {
|
||||
Assertions.assertDoesNotThrow(() -> admin.deleteTopics(Set.of(topic)).all().get());
|
||||
}
|
||||
}
|
||||
|
||||
private void removeConsumer(Set<String> groupIds) {
|
||||
try (Admin admin = Admin.create(Collections.singletonMap(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()))) {
|
||||
try (Admin admin = Admin.create(Map.of(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()))) {
|
||||
Assertions.assertDoesNotThrow(() -> admin.deleteConsumerGroups(groupIds).all().get());
|
||||
}
|
||||
}
|
||||
|
@ -603,7 +600,7 @@ public class ListConsumerGroupTest {
|
|||
// Parse the header if one is expected.
|
||||
if (!expectedHeader.isEmpty()) {
|
||||
if (lines.length == 0) return false;
|
||||
List<String> header = Arrays.stream(lines[index++].split("\\s+")).collect(Collectors.toList());
|
||||
List<String> header = Arrays.stream(lines[index++].split("\\s+")).toList();
|
||||
if (!expectedHeader.equals(header)) {
|
||||
return false;
|
||||
}
|
||||
|
@ -612,7 +609,7 @@ public class ListConsumerGroupTest {
|
|||
// Parse the groups.
|
||||
Set<List<String>> groups = new HashSet<>();
|
||||
for (; index < lines.length; index++) {
|
||||
groups.add(Arrays.stream(lines[index].split("\\s+")).collect(Collectors.toList()));
|
||||
groups.add(Arrays.stream(lines[index].split("\\s+")).toList());
|
||||
}
|
||||
return expectedRows.equals(groups);
|
||||
}, () -> String.format("Expected header=%s and groups=%s, but found:%n%s", expectedHeader, expectedRows, out.get()));
|
||||
|
@ -627,22 +624,22 @@ class ListConsumerGroupUnitTest {
|
|||
@Test
|
||||
public void testConsumerGroupStatesFromString() {
|
||||
Set<GroupState> result = ConsumerGroupCommand.groupStatesFromString("Stable");
|
||||
Assertions.assertEquals(ListConsumerGroupTest.set(Collections.singleton(GroupState.STABLE)), result);
|
||||
Assertions.assertEquals(ListConsumerGroupTest.set(Set.of(GroupState.STABLE)), result);
|
||||
|
||||
result = ConsumerGroupCommand.groupStatesFromString("Stable, PreparingRebalance");
|
||||
Assertions.assertEquals(ListConsumerGroupTest.set(Arrays.asList(GroupState.STABLE, GroupState.PREPARING_REBALANCE)), result);
|
||||
Assertions.assertEquals(ListConsumerGroupTest.set(List.of(GroupState.STABLE, GroupState.PREPARING_REBALANCE)), result);
|
||||
|
||||
result = ConsumerGroupCommand.groupStatesFromString("Dead,CompletingRebalance,");
|
||||
Assertions.assertEquals(ListConsumerGroupTest.set(Arrays.asList(GroupState.DEAD, GroupState.COMPLETING_REBALANCE)), result);
|
||||
Assertions.assertEquals(ListConsumerGroupTest.set(List.of(GroupState.DEAD, GroupState.COMPLETING_REBALANCE)), result);
|
||||
|
||||
result = ConsumerGroupCommand.groupStatesFromString("stable");
|
||||
Assertions.assertEquals(ListConsumerGroupTest.set(Collections.singletonList(GroupState.STABLE)), result);
|
||||
Assertions.assertEquals(ListConsumerGroupTest.set(List.of(GroupState.STABLE)), result);
|
||||
|
||||
result = ConsumerGroupCommand.groupStatesFromString("stable, assigning");
|
||||
Assertions.assertEquals(ListConsumerGroupTest.set(Arrays.asList(GroupState.STABLE, GroupState.ASSIGNING)), result);
|
||||
Assertions.assertEquals(ListConsumerGroupTest.set(List.of(GroupState.STABLE, GroupState.ASSIGNING)), result);
|
||||
|
||||
result = ConsumerGroupCommand.groupStatesFromString("dead,reconciling,");
|
||||
Assertions.assertEquals(ListConsumerGroupTest.set(Arrays.asList(GroupState.DEAD, GroupState.RECONCILING)), result);
|
||||
Assertions.assertEquals(ListConsumerGroupTest.set(List.of(GroupState.DEAD, GroupState.RECONCILING)), result);
|
||||
|
||||
Assertions.assertThrows(IllegalArgumentException.class, () -> ConsumerGroupCommand.groupStatesFromString("bad, wrong"));
|
||||
|
||||
|
@ -654,13 +651,13 @@ class ListConsumerGroupUnitTest {
|
|||
@Test
|
||||
public void testConsumerGroupTypesFromString() {
|
||||
Set<GroupType> result = ConsumerGroupCommand.consumerGroupTypesFromString("consumer");
|
||||
Assertions.assertEquals(ListConsumerGroupTest.set(Collections.singleton(GroupType.CONSUMER)), result);
|
||||
Assertions.assertEquals(ListConsumerGroupTest.set(Set.of(GroupType.CONSUMER)), result);
|
||||
|
||||
result = ConsumerGroupCommand.consumerGroupTypesFromString("consumer, classic");
|
||||
Assertions.assertEquals(ListConsumerGroupTest.set(Arrays.asList(GroupType.CONSUMER, GroupType.CLASSIC)), result);
|
||||
Assertions.assertEquals(ListConsumerGroupTest.set(List.of(GroupType.CONSUMER, GroupType.CLASSIC)), result);
|
||||
|
||||
result = ConsumerGroupCommand.consumerGroupTypesFromString("Consumer, Classic");
|
||||
Assertions.assertEquals(ListConsumerGroupTest.set(Arrays.asList(GroupType.CONSUMER, GroupType.CLASSIC)), result);
|
||||
Assertions.assertEquals(ListConsumerGroupTest.set(List.of(GroupType.CONSUMER, GroupType.CLASSIC)), result);
|
||||
|
||||
Assertions.assertThrows(IllegalArgumentException.class, () -> ConsumerGroupCommand.consumerGroupTypesFromString("Share"));
|
||||
|
||||
|
|
|
@ -56,16 +56,11 @@ import java.util.Set;
|
|||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.IntStream;
|
||||
|
||||
import joptsimple.OptionException;
|
||||
|
||||
import static java.time.LocalDateTime.now;
|
||||
import static java.util.Arrays.asList;
|
||||
import static java.util.Collections.singleton;
|
||||
import static java.util.Collections.singletonList;
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static java.util.stream.Collectors.toMap;
|
||||
import static org.apache.kafka.clients.CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG;
|
||||
import static org.apache.kafka.clients.admin.AdminClientConfig.RETRIES_CONFIG;
|
||||
|
@ -121,23 +116,23 @@ public class ResetConsumerGroupOffsetTest {
|
|||
}
|
||||
|
||||
private String[] buildArgsForGroups(ClusterInstance cluster, List<String> groups, String... args) {
|
||||
List<String> res = new ArrayList<>(asList(basicArgs(cluster)));
|
||||
List<String> res = new ArrayList<>(List.of(basicArgs(cluster)));
|
||||
for (String group : groups) {
|
||||
res.add("--group");
|
||||
res.add(group);
|
||||
}
|
||||
res.addAll(asList(args));
|
||||
res.addAll(List.of(args));
|
||||
return res.toArray(new String[0]);
|
||||
}
|
||||
|
||||
private String[] buildArgsForGroup(ClusterInstance cluster, String group, String... args) {
|
||||
return buildArgsForGroups(cluster, singletonList(group), args);
|
||||
return buildArgsForGroups(cluster, List.of(group), args);
|
||||
}
|
||||
|
||||
private String[] buildArgsForAllGroups(ClusterInstance cluster, String... args) {
|
||||
List<String> res = new ArrayList<>(asList(basicArgs(cluster)));
|
||||
List<String> res = new ArrayList<>(List.of(basicArgs(cluster)));
|
||||
res.add("--all-groups");
|
||||
res.addAll(asList(args));
|
||||
res.addAll(List.of(args));
|
||||
return res.toArray(new String[0]);
|
||||
}
|
||||
|
||||
|
@ -183,11 +178,11 @@ public class ResetConsumerGroupOffsetTest {
|
|||
String[] args = buildArgsForGroup(cluster, group, "--topic", topic, "--to-offset", "50");
|
||||
|
||||
produceMessages(cluster, topic, 100);
|
||||
resetAndAssertOffsets(cluster, args, 50, true, singletonList(topic));
|
||||
resetAndAssertOffsets(cluster, args, 50, true, List.of(topic));
|
||||
resetAndAssertOffsets(cluster, addTo(args, "--dry-run"),
|
||||
50, true, singletonList(topic));
|
||||
50, true, List.of(topic));
|
||||
resetAndAssertOffsets(cluster, addTo(args, "--execute"),
|
||||
50, false, singletonList(topic));
|
||||
50, false, List.of(topic));
|
||||
}
|
||||
|
||||
@ClusterTest
|
||||
|
@ -205,11 +200,11 @@ public class ResetConsumerGroupOffsetTest {
|
|||
}
|
||||
|
||||
String[] args = buildArgsForGroups(cluster, groups, "--topic", topic, "--to-offset", "50");
|
||||
resetAndAssertOffsets(cluster, args, 50, true, singletonList(topic));
|
||||
resetAndAssertOffsets(cluster, args, 50, true, List.of(topic));
|
||||
resetAndAssertOffsets(cluster, addTo(args, "--dry-run"),
|
||||
50, true, singletonList(topic));
|
||||
50, true, List.of(topic));
|
||||
resetAndAssertOffsets(cluster, addTo(args, "--execute"),
|
||||
50, false, singletonList(topic));
|
||||
50, false, List.of(topic));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -227,11 +222,11 @@ public class ResetConsumerGroupOffsetTest {
|
|||
awaitConsumerProgress(cluster, topic, group, 100L);
|
||||
}
|
||||
}
|
||||
resetAndAssertOffsets(cluster, args, 50, true, singletonList(topic));
|
||||
resetAndAssertOffsets(cluster, args, 50, true, List.of(topic));
|
||||
resetAndAssertOffsets(cluster, addTo(args, "--dry-run"),
|
||||
50, true, singletonList(topic));
|
||||
50, true, List.of(topic));
|
||||
resetAndAssertOffsets(cluster, addTo(args, "--execute"),
|
||||
50, false, singletonList(topic));
|
||||
50, false, List.of(topic));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -346,9 +341,9 @@ public class ResetConsumerGroupOffsetTest {
|
|||
String[] args = buildArgsForGroup(cluster, group, "--topic", topic, "--by-duration", "PT1M", "--execute");
|
||||
|
||||
try (Admin admin = cluster.admin()) {
|
||||
admin.createTopics(singleton(new NewTopic(topic, 1, (short) 1))).all().get();
|
||||
resetAndAssertOffsets(cluster, args, 0, false, singletonList(topic));
|
||||
admin.deleteTopics(singleton(topic)).all().get();
|
||||
admin.createTopics(Set.of(new NewTopic(topic, 1, (short) 1))).all().get();
|
||||
resetAndAssertOffsets(cluster, args, 0, false, List.of(topic));
|
||||
admin.deleteTopics(Set.of(topic)).all().get();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -476,7 +471,7 @@ public class ResetConsumerGroupOffsetTest {
|
|||
|
||||
try (Admin admin = cluster.admin();
|
||||
ConsumerGroupCommand.ConsumerGroupService service = getConsumerGroupService(args)) {
|
||||
admin.createTopics(singleton(new NewTopic(topic, 2, (short) 1))).all().get();
|
||||
admin.createTopics(Set.of(new NewTopic(topic, 2, (short) 1))).all().get();
|
||||
|
||||
produceConsumeAndShutdown(cluster, topic, group, 2, groupProtocol);
|
||||
Map<TopicPartition, Long> priorCommittedOffsets = committedOffsets(cluster, topic, group);
|
||||
|
@ -487,7 +482,7 @@ public class ResetConsumerGroupOffsetTest {
|
|||
expectedOffsets.put(tp1, 0L);
|
||||
resetAndAssertOffsetsCommitted(cluster, service, expectedOffsets, topic);
|
||||
|
||||
admin.deleteTopics(singleton(topic)).all().get();
|
||||
admin.deleteTopics(Set.of(topic)).all().get();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -505,7 +500,7 @@ public class ResetConsumerGroupOffsetTest {
|
|||
|
||||
try (Admin admin = cluster.admin();
|
||||
ConsumerGroupCommand.ConsumerGroupService service = getConsumerGroupService(args)) {
|
||||
admin.createTopics(asList(new NewTopic(topic1, 1, (short) 1),
|
||||
admin.createTopics(List.of(new NewTopic(topic1, 1, (short) 1),
|
||||
new NewTopic(topic2, 1, (short) 1))).all().get();
|
||||
|
||||
produceConsumeAndShutdown(cluster, topic1, group, 1, groupProtocol);
|
||||
|
@ -519,10 +514,10 @@ public class ResetConsumerGroupOffsetTest {
|
|||
expMap.put(tp1, 0L);
|
||||
expMap.put(tp2, 0L);
|
||||
assertEquals(expMap, allResetOffsets);
|
||||
assertEquals(singletonMap(tp1, 0L), committedOffsets(cluster, topic1, group));
|
||||
assertEquals(singletonMap(tp2, 0L), committedOffsets(cluster, topic2, group));
|
||||
assertEquals(Map.of(tp1, 0L), committedOffsets(cluster, topic1, group));
|
||||
assertEquals(Map.of(tp2, 0L), committedOffsets(cluster, topic2, group));
|
||||
|
||||
admin.deleteTopics(asList(topic1, topic2)).all().get();
|
||||
admin.deleteTopics(List.of(topic1, topic2)).all().get();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -540,7 +535,7 @@ public class ResetConsumerGroupOffsetTest {
|
|||
|
||||
try (Admin admin = cluster.admin();
|
||||
ConsumerGroupCommand.ConsumerGroupService service = getConsumerGroupService(args)) {
|
||||
admin.createTopics(asList(new NewTopic(topic1, 2, (short) 1),
|
||||
admin.createTopics(List.of(new NewTopic(topic1, 2, (short) 1),
|
||||
new NewTopic(topic2, 2, (short) 1))).all().get();
|
||||
|
||||
produceConsumeAndShutdown(cluster, topic1, group, 2, groupProtocol);
|
||||
|
@ -563,7 +558,7 @@ public class ResetConsumerGroupOffsetTest {
|
|||
priorCommittedOffsets2.put(tp2, 0L);
|
||||
assertEquals(priorCommittedOffsets2, committedOffsets(cluster, topic2, group));
|
||||
|
||||
admin.deleteTopics(asList(topic1, topic2)).all().get();
|
||||
admin.deleteTopics(List.of(topic1, topic2)).all().get();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -584,7 +579,7 @@ public class ResetConsumerGroupOffsetTest {
|
|||
try (Admin admin = cluster.admin();
|
||||
ConsumerGroupCommand.ConsumerGroupService service = getConsumerGroupService(cgcArgs)) {
|
||||
|
||||
admin.createTopics(singleton(new NewTopic(topic, 2, (short) 1))).all().get();
|
||||
admin.createTopics(Set.of(new NewTopic(topic, 2, (short) 1))).all().get();
|
||||
produceConsumeAndShutdown(cluster, topic, group, 2, groupProtocol);
|
||||
|
||||
Map<String, Map<TopicPartition, OffsetAndMetadata>> exportedOffsets = service.resetOffsets();
|
||||
|
@ -603,7 +598,7 @@ public class ResetConsumerGroupOffsetTest {
|
|||
assertEquals(exp1, toOffsetMap(importedOffsets.get(group)));
|
||||
}
|
||||
|
||||
admin.deleteTopics(singleton(topic));
|
||||
admin.deleteTopics(Set.of(topic));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -622,14 +617,14 @@ public class ResetConsumerGroupOffsetTest {
|
|||
TopicPartition t1p1 = new TopicPartition(topic1, 1);
|
||||
TopicPartition t2p0 = new TopicPartition(topic2, 0);
|
||||
TopicPartition t2p1 = new TopicPartition(topic2, 1);
|
||||
String[] cgcArgs = buildArgsForGroups(cluster, asList(group1, group2),
|
||||
String[] cgcArgs = buildArgsForGroups(cluster, List.of(group1, group2),
|
||||
"--all-topics", "--to-offset", "2", "--export");
|
||||
File file = TestUtils.tempFile("reset", ".csv");
|
||||
|
||||
try (Admin admin = cluster.admin();
|
||||
ConsumerGroupCommand.ConsumerGroupService service = getConsumerGroupService(cgcArgs)) {
|
||||
|
||||
admin.createTopics(asList(new NewTopic(topic1, 2, (short) 1),
|
||||
admin.createTopics(List.of(new NewTopic(topic1, 2, (short) 1),
|
||||
new NewTopic(topic2, 2, (short) 1))).all().get();
|
||||
|
||||
produceConsumeAndShutdown(cluster, topic1, group1, 1, groupProtocol);
|
||||
|
@ -653,7 +648,7 @@ public class ResetConsumerGroupOffsetTest {
|
|||
assertEquals(exp2, toOffsetMap(exportedOffsets.get(group2)));
|
||||
|
||||
// Multiple --group's offset import
|
||||
String[] cgcArgsExec = buildArgsForGroups(cluster, asList(group1, group2),
|
||||
String[] cgcArgsExec = buildArgsForGroups(cluster, List.of(group1, group2),
|
||||
"--all-topics",
|
||||
"--from-file", file.getCanonicalPath(), "--dry-run");
|
||||
try (ConsumerGroupCommand.ConsumerGroupService serviceExec = getConsumerGroupService(cgcArgsExec)) {
|
||||
|
@ -670,7 +665,7 @@ public class ResetConsumerGroupOffsetTest {
|
|||
assertEquals(exp1, toOffsetMap(importedOffsets2.get(group1)));
|
||||
}
|
||||
|
||||
admin.deleteTopics(asList(topic1, topic2));
|
||||
admin.deleteTopics(List.of(topic1, topic2));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -695,7 +690,7 @@ public class ResetConsumerGroupOffsetTest {
|
|||
try (Admin admin = cluster.admin();
|
||||
ConsumerGroupCommand.ConsumerGroupService service = getConsumerGroupService(args)) {
|
||||
|
||||
admin.createTopics(singleton(new NewTopic(topic, 3, (short) 1))).all().get();
|
||||
admin.createTopics(Set.of(new NewTopic(topic, 3, (short) 1))).all().get();
|
||||
produceConsumeAndShutdown(cluster, topic, group, 2, GroupProtocol.CLASSIC);
|
||||
assertDoesNotThrow(() -> resetOffsets(service));
|
||||
// shutdown a broker to make some partitions missing leader
|
||||
|
@ -714,7 +709,7 @@ public class ResetConsumerGroupOffsetTest {
|
|||
try (Admin admin = cluster.admin();
|
||||
ConsumerGroupCommand.ConsumerGroupService service = getConsumerGroupService(args)) {
|
||||
|
||||
admin.createTopics(singleton(new NewTopic(topic, 1, (short) 1))).all().get();
|
||||
admin.createTopics(Set.of(new NewTopic(topic, 1, (short) 1))).all().get();
|
||||
produceConsumeAndShutdown(cluster, topic, group, 2, GroupProtocol.CLASSIC);
|
||||
assertThrows(UnknownTopicOrPartitionException.class, () -> resetOffsets(service));
|
||||
}
|
||||
|
@ -731,7 +726,7 @@ public class ResetConsumerGroupOffsetTest {
|
|||
private Map<TopicPartition, Long> committedOffsets(ClusterInstance cluster,
|
||||
String topic,
|
||||
String group) {
|
||||
try (Admin admin = Admin.create(singletonMap(BOOTSTRAP_SERVERS_CONFIG, cluster.bootstrapServers()))) {
|
||||
try (Admin admin = Admin.create(Map.of(BOOTSTRAP_SERVERS_CONFIG, cluster.bootstrapServers()))) {
|
||||
return admin.listConsumerGroupOffsets(group)
|
||||
.all().get()
|
||||
.get(group).entrySet()
|
||||
|
@ -746,13 +741,13 @@ public class ResetConsumerGroupOffsetTest {
|
|||
private ConsumerGroupCommand.ConsumerGroupService getConsumerGroupService(String[] args) {
|
||||
return new ConsumerGroupCommand.ConsumerGroupService(
|
||||
ConsumerGroupCommandOptions.fromArgs(args),
|
||||
singletonMap(RETRIES_CONFIG, Integer.toString(Integer.MAX_VALUE)));
|
||||
Map.of(RETRIES_CONFIG, Integer.toString(Integer.MAX_VALUE)));
|
||||
}
|
||||
|
||||
private void produceMessages(ClusterInstance cluster, String topic, int numMessages) {
|
||||
List<ProducerRecord<byte[], byte[]>> records = IntStream.range(0, numMessages)
|
||||
.mapToObj(i -> new ProducerRecord<byte[], byte[]>(topic, new byte[100 * 1000]))
|
||||
.collect(Collectors.toList());
|
||||
.toList();
|
||||
produceMessages(cluster, records);
|
||||
}
|
||||
|
||||
|
@ -775,7 +770,7 @@ public class ResetConsumerGroupOffsetTest {
|
|||
String topic,
|
||||
String[] args,
|
||||
long expectedOffset) {
|
||||
resetAndAssertOffsets(cluster, args, expectedOffset, false, singletonList(topic));
|
||||
resetAndAssertOffsets(cluster, args, expectedOffset, false, List.of(topic));
|
||||
}
|
||||
|
||||
private void resetAndAssertOffsets(ClusterInstance cluster,
|
||||
|
@ -802,7 +797,7 @@ public class ResetConsumerGroupOffsetTest {
|
|||
long expectedOffset) {
|
||||
return topics.stream()
|
||||
.collect(toMap(Function.identity(),
|
||||
topic -> singletonMap(new TopicPartition(topic, 0),
|
||||
topic -> Map.of(new TopicPartition(topic, 0),
|
||||
expectedOffset)));
|
||||
}
|
||||
|
||||
|
@ -822,7 +817,7 @@ public class ResetConsumerGroupOffsetTest {
|
|||
private static List<String> generateIds(String name) {
|
||||
return IntStream.rangeClosed(1, 2)
|
||||
.mapToObj(id -> name + id)
|
||||
.collect(Collectors.toList());
|
||||
.toList();
|
||||
}
|
||||
|
||||
private void produceConsumeAndShutdown(ClusterInstance cluster,
|
||||
|
@ -877,7 +872,7 @@ public class ResetConsumerGroupOffsetTest {
|
|||
String topic,
|
||||
String group,
|
||||
long count) throws Exception {
|
||||
try (Admin admin = Admin.create(singletonMap(BOOTSTRAP_SERVERS_CONFIG, cluster.bootstrapServers()))) {
|
||||
try (Admin admin = Admin.create(Map.of(BOOTSTRAP_SERVERS_CONFIG, cluster.bootstrapServers()))) {
|
||||
Supplier<Long> offsets = () -> {
|
||||
try {
|
||||
return admin.listConsumerGroupOffsets(group)
|
||||
|
@ -926,8 +921,8 @@ public class ResetConsumerGroupOffsetTest {
|
|||
}
|
||||
|
||||
private String[] addTo(String[] args, String... extra) {
|
||||
List<String> res = new ArrayList<>(asList(args));
|
||||
res.addAll(asList(extra));
|
||||
List<String> res = new ArrayList<>(List.of(args));
|
||||
res.addAll(List.of(extra));
|
||||
return res.toArray(new String[0]);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,9 +39,10 @@ import org.junit.jupiter.params.provider.MethodSource;
|
|||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.time.Duration;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
|
@ -60,14 +61,14 @@ public class SaslClientsWithInvalidCredentialsTest extends AbstractSaslTest {
|
|||
public static final int NUM_PARTITIONS = 1;
|
||||
public static final int BROKER_COUNT = 1;
|
||||
public static final String KAFKA_CLIENT_SASL_MECHANISM = "SCRAM-SHA-256";
|
||||
private static final Seq<String> KAFKA_SERVER_SASL_MECHANISMS = CollectionConverters.asScala(Collections.singletonList(KAFKA_CLIENT_SASL_MECHANISM)).toSeq();
|
||||
private static final Seq<String> KAFKA_SERVER_SASL_MECHANISMS = CollectionConverters.asScala(List.of(KAFKA_CLIENT_SASL_MECHANISM)).toSeq();
|
||||
|
||||
private Consumer<byte[], byte[]> createConsumer() {
|
||||
return createConsumer(
|
||||
new ByteArrayDeserializer(),
|
||||
new ByteArrayDeserializer(),
|
||||
new Properties(),
|
||||
CollectionConverters.asScala(Collections.<String>emptySet()).toList()
|
||||
CollectionConverters.asScala(Set.<String>of()).toList()
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -99,7 +100,7 @@ public class SaslClientsWithInvalidCredentialsTest extends AbstractSaslTest {
|
|||
@Override
|
||||
public void addFormatterSettings(Formatter formatter) {
|
||||
formatter.setClusterId("XcZZOzUqS4yHOjhMQB6JLQ");
|
||||
formatter.setScramArguments(Arrays.asList("SCRAM-SHA-256=[name=" + JaasTestUtils.KAFKA_SCRAM_ADMIN +
|
||||
formatter.setScramArguments(List.of("SCRAM-SHA-256=[name=" + JaasTestUtils.KAFKA_SCRAM_ADMIN +
|
||||
",password=" + JaasTestUtils.KAFKA_SCRAM_ADMIN_PASSWORD + "]"));
|
||||
}
|
||||
|
||||
|
@ -119,7 +120,7 @@ public class SaslClientsWithInvalidCredentialsTest extends AbstractSaslTest {
|
|||
this.superuserClientConfig().put(SaslConfigs.SASL_JAAS_CONFIG, superuserLoginContext);
|
||||
super.setUp(testInfo);
|
||||
try (Admin admin = createPrivilegedAdminClient()) {
|
||||
admin.createTopics(Collections.singletonList(
|
||||
admin.createTopics(List.of(
|
||||
new NewTopic(TOPIC, NUM_PARTITIONS, (short) BROKER_COUNT))).all().
|
||||
get(5, TimeUnit.MINUTES);
|
||||
} catch (ExecutionException | InterruptedException | TimeoutException e) {
|
||||
|
@ -142,7 +143,7 @@ public class SaslClientsWithInvalidCredentialsTest extends AbstractSaslTest {
|
|||
ConsumerGroupCommand.ConsumerGroupService consumerGroupService = prepareConsumerGroupService();
|
||||
Consumer<byte[], byte[]> consumer = createConsumer()
|
||||
) {
|
||||
consumer.subscribe(Collections.singletonList(TOPIC));
|
||||
consumer.subscribe(List.of(TOPIC));
|
||||
verifyAuthenticationException(consumerGroupService::listGroups);
|
||||
}
|
||||
}
|
||||
|
@ -156,7 +157,7 @@ public class SaslClientsWithInvalidCredentialsTest extends AbstractSaslTest {
|
|||
ConsumerGroupCommand.ConsumerGroupService consumerGroupService = prepareConsumerGroupService();
|
||||
Consumer<byte[], byte[]> consumer = createConsumer()
|
||||
) {
|
||||
consumer.subscribe(Collections.singletonList(TOPIC));
|
||||
consumer.subscribe(List.of(TOPIC));
|
||||
|
||||
TestUtils.waitForCondition(() -> {
|
||||
try {
|
||||
|
@ -180,7 +181,7 @@ public class SaslClientsWithInvalidCredentialsTest extends AbstractSaslTest {
|
|||
"--group", "test.group",
|
||||
"--command-config", propsFile.getAbsolutePath()};
|
||||
ConsumerGroupCommandOptions opts = ConsumerGroupCommandOptions.fromArgs(cgcArgs);
|
||||
return new ConsumerGroupCommand.ConsumerGroupService(opts, Collections.emptyMap());
|
||||
return new ConsumerGroupCommand.ConsumerGroupService(opts, Map.of());
|
||||
}
|
||||
|
||||
private void verifyAuthenticationException(Executable action) {
|
||||
|
|
|
@ -127,7 +127,7 @@ public class ShareGroupCommandTest {
|
|||
String[] cgcArgs = new String[]{"--bootstrap-server", bootstrapServer, "--list"};
|
||||
Admin adminClient = mock(KafkaAdminClient.class);
|
||||
ListGroupsResult result = mock(ListGroupsResult.class);
|
||||
when(result.all()).thenReturn(KafkaFuture.completedFuture(Arrays.asList(
|
||||
when(result.all()).thenReturn(KafkaFuture.completedFuture(List.of(
|
||||
new GroupListing(firstGroup, Optional.of(GroupType.SHARE), "share", Optional.of(GroupState.STABLE)),
|
||||
new GroupListing(secondGroup, Optional.of(GroupType.SHARE), "share", Optional.of(GroupState.EMPTY))
|
||||
)));
|
||||
|
@ -153,7 +153,7 @@ public class ShareGroupCommandTest {
|
|||
String[] cgcArgs = new String[]{"--bootstrap-server", bootstrapServer, "--list", "--state"};
|
||||
Admin adminClient = mock(KafkaAdminClient.class);
|
||||
ListGroupsResult resultWithAllStates = mock(ListGroupsResult.class);
|
||||
when(resultWithAllStates.all()).thenReturn(KafkaFuture.completedFuture(Arrays.asList(
|
||||
when(resultWithAllStates.all()).thenReturn(KafkaFuture.completedFuture(List.of(
|
||||
new GroupListing(firstGroup, Optional.of(GroupType.SHARE), "share", Optional.of(GroupState.STABLE)),
|
||||
new GroupListing(secondGroup, Optional.of(GroupType.SHARE), "share", Optional.of(GroupState.EMPTY))
|
||||
)));
|
||||
|
@ -571,7 +571,7 @@ public class ShareGroupCommandTest {
|
|||
|
||||
for (List<String> describeType : DESCRIBE_TYPES) {
|
||||
// note the group to be queried is a different (non-existing) group
|
||||
List<String> cgcArgs = new ArrayList<>(Arrays.asList("--bootstrap-server", bootstrapServer, "--describe", "--group", missingGroup));
|
||||
List<String> cgcArgs = new ArrayList<>(List.of("--bootstrap-server", bootstrapServer, "--describe", "--group", missingGroup));
|
||||
cgcArgs.addAll(describeType);
|
||||
Admin adminClient = mock(KafkaAdminClient.class);
|
||||
DescribeShareGroupsResult describeShareGroupsResult = mock(DescribeShareGroupsResult.class);
|
||||
|
@ -677,7 +677,7 @@ public class ShareGroupCommandTest {
|
|||
String secondTopic = "t2";
|
||||
String bootstrapServer = "localhost:9092";
|
||||
|
||||
List<String> cgcArgs = new ArrayList<>(Arrays.asList("--bootstrap-server", bootstrapServer, "--delete-offsets", "--group", firstGroup, "--topic", firstTopic, "--topic", secondTopic));
|
||||
List<String> cgcArgs = new ArrayList<>(List.of("--bootstrap-server", bootstrapServer, "--delete-offsets", "--group", firstGroup, "--topic", firstTopic, "--topic", secondTopic));
|
||||
Admin adminClient = mock(KafkaAdminClient.class);
|
||||
DeleteShareGroupOffsetsResult result = mock(DeleteShareGroupOffsetsResult.class);
|
||||
|
||||
|
@ -715,7 +715,7 @@ public class ShareGroupCommandTest {
|
|||
String secondTopic = "t2";
|
||||
String bootstrapServer = "localhost:9092";
|
||||
|
||||
List<String> cgcArgs = new ArrayList<>(Arrays.asList("--bootstrap-server", bootstrapServer, "--delete-offsets", "--group", firstGroup, "--group", secondGroup, "--topic", firstTopic, "--topic", secondTopic));
|
||||
List<String> cgcArgs = new ArrayList<>(List.of("--bootstrap-server", bootstrapServer, "--delete-offsets", "--group", firstGroup, "--group", secondGroup, "--topic", firstTopic, "--topic", secondTopic));
|
||||
Admin adminClient = mock(KafkaAdminClient.class);
|
||||
|
||||
try (ShareGroupService service = getShareGroupService(cgcArgs.toArray(new String[0]), adminClient)) {
|
||||
|
@ -734,7 +734,7 @@ public class ShareGroupCommandTest {
|
|||
String secondTopic = "t2";
|
||||
String bootstrapServer = "localhost:9092";
|
||||
|
||||
List<String> cgcArgs = new ArrayList<>(Arrays.asList("--bootstrap-server", bootstrapServer, "--delete-offsets", "--group", firstGroup, "--topic", firstTopic, "--topic", secondTopic));
|
||||
List<String> cgcArgs = new ArrayList<>(List.of("--bootstrap-server", bootstrapServer, "--delete-offsets", "--group", firstGroup, "--topic", firstTopic, "--topic", secondTopic));
|
||||
Admin adminClient = mock(KafkaAdminClient.class);
|
||||
DeleteShareGroupOffsetsResult result = mock(DeleteShareGroupOffsetsResult.class);
|
||||
|
||||
|
@ -787,7 +787,7 @@ public class ShareGroupCommandTest {
|
|||
String secondTopic = "t2";
|
||||
String bootstrapServer = "localhost:9092";
|
||||
|
||||
List<String> cgcArgs = new ArrayList<>(Arrays.asList("--bootstrap-server", bootstrapServer, "--delete-offsets", "--group", firstGroup, "--topic", firstTopic, "--topic", secondTopic));
|
||||
List<String> cgcArgs = new ArrayList<>(List.of("--bootstrap-server", bootstrapServer, "--delete-offsets", "--group", firstGroup, "--topic", firstTopic, "--topic", secondTopic));
|
||||
Admin adminClient = mock(KafkaAdminClient.class);
|
||||
DeleteShareGroupOffsetsResult result = mock(DeleteShareGroupOffsetsResult.class);
|
||||
|
||||
|
|
|
@ -39,11 +39,11 @@ import org.junit.jupiter.params.provider.MethodSource;
|
|||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.PrintStream;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
||||
|
||||
|
@ -55,7 +55,7 @@ public class ShareGroupStateMessageFormatterTest extends CoordinatorRecordMessag
|
|||
.setLeaderEpoch(20)
|
||||
.setStartOffset(50)
|
||||
.setStateBatches(
|
||||
Arrays.asList(
|
||||
List.of(
|
||||
new PersisterStateBatch(
|
||||
100,
|
||||
200,
|
||||
|
@ -78,7 +78,7 @@ public class ShareGroupStateMessageFormatterTest extends CoordinatorRecordMessag
|
|||
.setLeaderEpoch(25)
|
||||
.setStartOffset(55)
|
||||
.setStateBatches(
|
||||
Arrays.asList(
|
||||
List.of(
|
||||
new PersisterStateBatch(
|
||||
100,
|
||||
150,
|
||||
|
@ -229,7 +229,7 @@ public class ShareGroupStateMessageFormatterTest extends CoordinatorRecordMessag
|
|||
new RecordHeaders(), Optional.empty());
|
||||
|
||||
try (MessageFormatter formatter = new ShareGroupStateMessageFormatter()) {
|
||||
formatter.configure(emptyMap());
|
||||
formatter.configure(Map.of());
|
||||
ByteArrayOutputStream out = new ByteArrayOutputStream();
|
||||
RuntimeException re = assertThrows(RuntimeException.class, () -> formatter.writeTo(record, new PrintStream(out)));
|
||||
assertEquals(expectedOutput.getMessage(), re.getMessage());
|
||||
|
|
|
@ -59,11 +59,11 @@ import java.nio.file.Files;
|
|||
import java.text.DecimalFormat;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.TreeMap;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.function.Function;
|
||||
|
@ -106,7 +106,7 @@ public class ReplicationQuotasTestRig {
|
|||
boolean displayChartsOnScreen = args.length > 0 && Objects.equals(args[0], "show-gui");
|
||||
Journal journal = new Journal();
|
||||
|
||||
List<ExperimentDef> experiments = Arrays.asList(
|
||||
List<ExperimentDef> experiments = List.of(
|
||||
//1GB total data written, will take 210s
|
||||
new ExperimentDef("Experiment1", 5, 20, 1 * K, 500, 100 * 1000),
|
||||
//5GB total data written, will take 110s
|
||||
|
@ -209,11 +209,11 @@ public class ReplicationQuotasTestRig {
|
|||
|
||||
Map<Integer, List<Integer>> replicas = IntStream.rangeClosed(0, config.partitions - 1).boxed().collect(Collectors.toMap(
|
||||
Function.identity(),
|
||||
partition -> Collections.singletonList(nextReplicaRoundRobin.getAsInt())
|
||||
partition -> List.of(nextReplicaRoundRobin.getAsInt())
|
||||
));
|
||||
|
||||
startBrokers(config.brokers);
|
||||
adminClient.createTopics(Collections.singleton(new NewTopic(TOPIC_NAME, replicas))).all().get();
|
||||
adminClient.createTopics(Set.of(new NewTopic(TOPIC_NAME, replicas))).all().get();
|
||||
|
||||
TestUtils.waitUntilTrue(
|
||||
() -> cluster.brokers().values().stream().allMatch(server -> {
|
||||
|
@ -248,7 +248,7 @@ public class ReplicationQuotasTestRig {
|
|||
long start = System.currentTimeMillis();
|
||||
|
||||
ReassignPartitionsCommand.executeAssignment(adminClient, false,
|
||||
ReassignPartitionsCommand.formatAsReassignmentJson(newAssignment, Collections.emptyMap()),
|
||||
ReassignPartitionsCommand.formatAsReassignmentJson(newAssignment, Map.of()),
|
||||
config.throttle, -1L, 10000L, Time.SYSTEM, false);
|
||||
|
||||
//Await completion
|
||||
|
@ -282,12 +282,12 @@ public class ReplicationQuotasTestRig {
|
|||
}
|
||||
|
||||
void logOutput(ExperimentDef config, Map<Integer, List<Integer>> replicas, Map<TopicPartition, List<Integer>> newAssignment) throws Exception {
|
||||
List<TopicPartitionInfo> actual = adminClient.describeTopics(Collections.singleton(TOPIC_NAME))
|
||||
List<TopicPartitionInfo> actual = adminClient.describeTopics(Set.of(TOPIC_NAME))
|
||||
.allTopicNames().get().get(TOPIC_NAME).partitions();
|
||||
|
||||
Map<Integer, List<Integer>> curAssignment = actual.stream().collect(Collectors.toMap(
|
||||
TopicPartitionInfo::partition,
|
||||
p -> p.replicas().stream().map(Node::id).collect(Collectors.toList())
|
||||
p -> p.replicas().stream().map(Node::id).toList()
|
||||
));
|
||||
|
||||
//Long stats
|
||||
|
|
|
@ -427,7 +427,7 @@ public class ReassignPartitionsCommandTest {
|
|||
new CompletedMoveState(reassignment.targetDir)
|
||||
), false));
|
||||
|
||||
BrokerDirs info1 = new BrokerDirs(admin.describeLogDirs(IntStream.range(0, 4).boxed().collect(Collectors.toList())), 0);
|
||||
BrokerDirs info1 = new BrokerDirs(admin.describeLogDirs(IntStream.range(0, 4).boxed().toList()), 0);
|
||||
assertEquals(reassignment.targetDir, info1.curLogDirs.getOrDefault(topicPartition, ""));
|
||||
}
|
||||
}
|
||||
|
@ -640,8 +640,7 @@ public class ReassignPartitionsCommandTest {
|
|||
int brokerId,
|
||||
List<Integer> replicas) throws ExecutionException, InterruptedException {
|
||||
try (Admin admin = Admin.create(Map.of(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()))) {
|
||||
DescribeLogDirsResult describeLogDirsResult = admin.describeLogDirs(
|
||||
IntStream.range(0, 4).boxed().collect(Collectors.toList()));
|
||||
DescribeLogDirsResult describeLogDirsResult = admin.describeLogDirs(IntStream.range(0, 4).boxed().toList());
|
||||
|
||||
BrokerDirs logDirInfo = new BrokerDirs(describeLogDirsResult, brokerId);
|
||||
assertTrue(logDirInfo.futureLogDirs.isEmpty());
|
||||
|
@ -654,7 +653,7 @@ public class ReassignPartitionsCommandTest {
|
|||
return "\"" + newDir + "\"";
|
||||
else
|
||||
return "\"any\"";
|
||||
}).collect(Collectors.toList());
|
||||
}).toList();
|
||||
|
||||
String reassignmentJson =
|
||||
" { \"version\": 1," +
|
||||
|
|
|
@ -40,6 +40,7 @@ import org.junit.jupiter.api.Timeout;
|
|||
|
||||
import java.util.AbstractMap.SimpleImmutableEntry;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
|
@ -51,7 +52,6 @@ import java.util.concurrent.ExecutionException;
|
|||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static java.util.Arrays.asList;
|
||||
import static org.apache.kafka.tools.reassign.ReassignPartitionsCommand.alterPartitionReassignments;
|
||||
import static org.apache.kafka.tools.reassign.ReassignPartitionsCommand.alterReplicaLogDirs;
|
||||
import static org.apache.kafka.tools.reassign.ReassignPartitionsCommand.calculateFollowerThrottles;
|
||||
|
@ -120,11 +120,11 @@ public class ReassignPartitionsUnitTest {
|
|||
Map<TopicPartition, PartitionReassignmentState> states = new HashMap<>();
|
||||
|
||||
states.put(new TopicPartition("foo", 0),
|
||||
new PartitionReassignmentState(asList(1, 2, 3), asList(1, 2, 3), true));
|
||||
new PartitionReassignmentState(List.of(1, 2, 3), List.of(1, 2, 3), true));
|
||||
states.put(new TopicPartition("foo", 1),
|
||||
new PartitionReassignmentState(asList(1, 2, 3), asList(1, 2, 4), false));
|
||||
new PartitionReassignmentState(List.of(1, 2, 3), List.of(1, 2, 4), false));
|
||||
states.put(new TopicPartition("bar", 0),
|
||||
new PartitionReassignmentState(asList(1, 2, 3), asList(1, 2, 4), false));
|
||||
new PartitionReassignmentState(List.of(1, 2, 3), List.of(1, 2, 4), false));
|
||||
|
||||
assertEquals(String.join(System.lineSeparator(),
|
||||
"Status of partition reassignment:",
|
||||
|
@ -136,19 +136,19 @@ public class ReassignPartitionsUnitTest {
|
|||
|
||||
private void addTopics(MockAdminClient adminClient) {
|
||||
List<Node> b = adminClient.brokers();
|
||||
adminClient.addTopic(false, "foo", asList(
|
||||
adminClient.addTopic(false, "foo", List.of(
|
||||
new TopicPartitionInfo(0, b.get(0),
|
||||
asList(b.get(0), b.get(1), b.get(2)),
|
||||
asList(b.get(0), b.get(1))),
|
||||
List.of(b.get(0), b.get(1), b.get(2)),
|
||||
List.of(b.get(0), b.get(1))),
|
||||
new TopicPartitionInfo(1, b.get(1),
|
||||
asList(b.get(1), b.get(2), b.get(3)),
|
||||
asList(b.get(1), b.get(2), b.get(3)))
|
||||
), Collections.emptyMap());
|
||||
adminClient.addTopic(false, "bar", asList(
|
||||
List.of(b.get(1), b.get(2), b.get(3)),
|
||||
List.of(b.get(1), b.get(2), b.get(3)))
|
||||
), Map.of());
|
||||
adminClient.addTopic(false, "bar", List.of(
|
||||
new TopicPartitionInfo(0, b.get(2),
|
||||
asList(b.get(2), b.get(3), b.get(0)),
|
||||
asList(b.get(2), b.get(3), b.get(0)))
|
||||
), Collections.emptyMap());
|
||||
List.of(b.get(2), b.get(3), b.get(0)),
|
||||
List.of(b.get(2), b.get(3), b.get(0)))
|
||||
), Map.of());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -158,8 +158,8 @@ public class ReassignPartitionsUnitTest {
|
|||
// Create a reassignment and test findPartitionReassignmentStates.
|
||||
Map<TopicPartition, List<Integer>> reassignments = new HashMap<>();
|
||||
|
||||
reassignments.put(new TopicPartition("foo", 0), asList(0, 1, 3));
|
||||
reassignments.put(new TopicPartition("quux", 0), asList(1, 2, 3));
|
||||
reassignments.put(new TopicPartition("foo", 0), List.of(0, 1, 3));
|
||||
reassignments.put(new TopicPartition("quux", 0), List.of(1, 2, 3));
|
||||
|
||||
Map<TopicPartition, Throwable> reassignmentResult = alterPartitionReassignments(adminClient, reassignments, false);
|
||||
|
||||
|
@ -169,14 +169,14 @@ public class ReassignPartitionsUnitTest {
|
|||
Map<TopicPartition, PartitionReassignmentState> expStates = new HashMap<>();
|
||||
|
||||
expStates.put(new TopicPartition("foo", 0),
|
||||
new PartitionReassignmentState(asList(0, 1, 2), asList(0, 1, 3), false));
|
||||
new PartitionReassignmentState(List.of(0, 1, 2), List.of(0, 1, 3), false));
|
||||
expStates.put(new TopicPartition("foo", 1),
|
||||
new PartitionReassignmentState(asList(1, 2, 3), asList(1, 2, 3), true));
|
||||
new PartitionReassignmentState(List.of(1, 2, 3), List.of(1, 2, 3), true));
|
||||
|
||||
Entry<Map<TopicPartition, PartitionReassignmentState>, Boolean> actual =
|
||||
findPartitionReassignmentStates(adminClient, asList(
|
||||
new SimpleImmutableEntry<>(new TopicPartition("foo", 0), asList(0, 1, 3)),
|
||||
new SimpleImmutableEntry<>(new TopicPartition("foo", 1), asList(1, 2, 3))
|
||||
findPartitionReassignmentStates(adminClient, List.of(
|
||||
new SimpleImmutableEntry<>(new TopicPartition("foo", 0), List.of(0, 1, 3)),
|
||||
new SimpleImmutableEntry<>(new TopicPartition("foo", 1), List.of(1, 2, 3))
|
||||
));
|
||||
|
||||
assertEquals(expStates, actual.getKey());
|
||||
|
@ -192,13 +192,13 @@ public class ReassignPartitionsUnitTest {
|
|||
expStates.clear();
|
||||
|
||||
expStates.put(new TopicPartition("foo", 0),
|
||||
new PartitionReassignmentState(asList(0, 1, 2), asList(0, 1, 3), true));
|
||||
new PartitionReassignmentState(List.of(0, 1, 2), List.of(0, 1, 3), true));
|
||||
expStates.put(new TopicPartition("foo", 1),
|
||||
new PartitionReassignmentState(asList(1, 2, 3), asList(1, 2, 3), true));
|
||||
new PartitionReassignmentState(List.of(1, 2, 3), List.of(1, 2, 3), true));
|
||||
|
||||
actual = findPartitionReassignmentStates(adminClient, asList(
|
||||
new SimpleImmutableEntry<>(new TopicPartition("foo", 0), asList(0, 1, 3)),
|
||||
new SimpleImmutableEntry<>(new TopicPartition("foo", 1), asList(1, 2, 3))
|
||||
actual = findPartitionReassignmentStates(adminClient, List.of(
|
||||
new SimpleImmutableEntry<>(new TopicPartition("foo", 0), List.of(0, 1, 3)),
|
||||
new SimpleImmutableEntry<>(new TopicPartition("foo", 1), List.of(1, 2, 3))
|
||||
));
|
||||
|
||||
assertEquals(expStates, actual.getKey());
|
||||
|
@ -210,20 +210,20 @@ public class ReassignPartitionsUnitTest {
|
|||
public void testFindLogDirMoveStates() throws Exception {
|
||||
try (MockAdminClient adminClient = new MockAdminClient.Builder().
|
||||
numBrokers(4).
|
||||
brokerLogDirs(asList(
|
||||
asList("/tmp/kafka-logs0", "/tmp/kafka-logs1"),
|
||||
asList("/tmp/kafka-logs0", "/tmp/kafka-logs1"),
|
||||
asList("/tmp/kafka-logs0", "/tmp/kafka-logs1"),
|
||||
asList("/tmp/kafka-logs0", null)))
|
||||
brokerLogDirs(List.of(
|
||||
List.of("/tmp/kafka-logs0", "/tmp/kafka-logs1"),
|
||||
List.of("/tmp/kafka-logs0", "/tmp/kafka-logs1"),
|
||||
List.of("/tmp/kafka-logs0", "/tmp/kafka-logs1"),
|
||||
Arrays.asList("/tmp/kafka-logs0", null)))
|
||||
.build()) {
|
||||
|
||||
addTopics(adminClient);
|
||||
List<Node> b = adminClient.brokers();
|
||||
adminClient.addTopic(false, "quux", asList(
|
||||
adminClient.addTopic(false, "quux", List.of(
|
||||
new TopicPartitionInfo(0, b.get(2),
|
||||
asList(b.get(1), b.get(2), b.get(3)),
|
||||
asList(b.get(1), b.get(2), b.get(3)))),
|
||||
Collections.emptyMap());
|
||||
List.of(b.get(1), b.get(2), b.get(3)),
|
||||
List.of(b.get(1), b.get(2), b.get(3)))),
|
||||
Map.of());
|
||||
|
||||
Map<TopicPartitionReplica, String> replicaAssignment = new HashMap<>();
|
||||
|
||||
|
@ -286,15 +286,15 @@ public class ReassignPartitionsUnitTest {
|
|||
|
||||
Map<TopicPartition, List<Integer>> assignments = new HashMap<>();
|
||||
|
||||
assignments.put(new TopicPartition("foo", 0), asList(0, 1, 2));
|
||||
assignments.put(new TopicPartition("foo", 1), asList(1, 2, 3));
|
||||
assignments.put(new TopicPartition("foo", 0), List.of(0, 1, 2));
|
||||
assignments.put(new TopicPartition("foo", 1), List.of(1, 2, 3));
|
||||
|
||||
assertEquals(assignments, getReplicaAssignmentForTopics(adminClient, asList("foo")));
|
||||
assertEquals(assignments, getReplicaAssignmentForTopics(adminClient, List.of("foo")));
|
||||
|
||||
assignments.clear();
|
||||
|
||||
assignments.put(new TopicPartition("foo", 0), asList(0, 1, 2));
|
||||
assignments.put(new TopicPartition("bar", 0), asList(2, 3, 0));
|
||||
assignments.put(new TopicPartition("foo", 0), List.of(0, 1, 2));
|
||||
assignments.put(new TopicPartition("bar", 0), List.of(2, 3, 0));
|
||||
|
||||
assertEquals(assignments,
|
||||
getReplicaAssignmentForPartitions(adminClient, Set.of(new TopicPartition("foo", 0), new TopicPartition("bar", 0))));
|
||||
|
@ -311,26 +311,26 @@ public class ReassignPartitionsUnitTest {
|
|||
@Test
|
||||
public void testGetBrokerRackInformation() throws Exception {
|
||||
try (MockAdminClient adminClient = new MockAdminClient.Builder().
|
||||
brokers(asList(new Node(0, "localhost", 9092, "rack0"),
|
||||
brokers(List.of(new Node(0, "localhost", 9092, "rack0"),
|
||||
new Node(1, "localhost", 9093, "rack1"),
|
||||
new Node(2, "localhost", 9094, null))).
|
||||
build()) {
|
||||
|
||||
assertEquals(asList(
|
||||
assertEquals(List.of(
|
||||
new UsableBroker(0, Optional.of("rack0"), false),
|
||||
new UsableBroker(1, Optional.of("rack1"), false)
|
||||
), getBrokerMetadata(adminClient, asList(0, 1), true));
|
||||
assertEquals(asList(
|
||||
), getBrokerMetadata(adminClient, List.of(0, 1), true));
|
||||
assertEquals(List.of(
|
||||
new UsableBroker(0, Optional.empty(), false),
|
||||
new UsableBroker(1, Optional.empty(), false)
|
||||
), getBrokerMetadata(adminClient, asList(0, 1), false));
|
||||
), getBrokerMetadata(adminClient, List.of(0, 1), false));
|
||||
assertStartsWith("Not all brokers have rack information",
|
||||
assertThrows(AdminOperationException.class,
|
||||
() -> getBrokerMetadata(adminClient, asList(1, 2), true)).getMessage());
|
||||
assertEquals(asList(
|
||||
() -> getBrokerMetadata(adminClient, List.of(1, 2), true)).getMessage());
|
||||
assertEquals(List.of(
|
||||
new UsableBroker(1, Optional.empty(), false),
|
||||
new UsableBroker(2, Optional.empty(), false)
|
||||
), getBrokerMetadata(adminClient, asList(1, 2), false));
|
||||
), getBrokerMetadata(adminClient, List.of(1, 2), false));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -344,13 +344,13 @@ public class ReassignPartitionsUnitTest {
|
|||
assertThrows(AdminCommandFailedException.class, () -> parseGenerateAssignmentArgs(
|
||||
"{\"topics\": [{\"topic\": \"foo\"}], \"version\":1}", "5,2,3,4,5"),
|
||||
"Expected to detect duplicate broker list entries").getMessage());
|
||||
assertEquals(new SimpleImmutableEntry<>(asList(5, 2, 3, 4), asList("foo")),
|
||||
assertEquals(new SimpleImmutableEntry<>(List.of(5, 2, 3, 4), List.of("foo")),
|
||||
parseGenerateAssignmentArgs("{\"topics\": [{\"topic\": \"foo\"}], \"version\":1}", "5,2,3,4"));
|
||||
assertStartsWith("List of topics to reassign contains duplicate entries",
|
||||
assertThrows(AdminCommandFailedException.class, () -> parseGenerateAssignmentArgs(
|
||||
"{\"topics\": [{\"topic\": \"foo\"},{\"topic\": \"foo\"}], \"version\":1}", "5,2,3,4"),
|
||||
"Expected to detect duplicate topic entries").getMessage());
|
||||
assertEquals(new SimpleImmutableEntry<>(asList(5, 3, 4), asList("foo", "bar")),
|
||||
assertEquals(new SimpleImmutableEntry<>(List.of(5, 3, 4), List.of("foo", "bar")),
|
||||
parseGenerateAssignmentArgs(
|
||||
"{\"topics\": [{\"topic\": \"foo\"},{\"topic\": \"bar\"}], \"version\":1}", "5,3,4"));
|
||||
}
|
||||
|
@ -380,7 +380,7 @@ public class ReassignPartitionsUnitTest {
|
|||
@Test
|
||||
public void testGenerateAssignmentWithInconsistentRacks() throws Exception {
|
||||
try (MockAdminClient adminClient = new MockAdminClient.Builder().
|
||||
brokers(asList(
|
||||
brokers(List.of(
|
||||
new Node(0, "localhost", 9092, "rack0"),
|
||||
new Node(1, "localhost", 9093, "rack0"),
|
||||
new Node(2, "localhost", 9094, null),
|
||||
|
@ -400,8 +400,8 @@ public class ReassignPartitionsUnitTest {
|
|||
|
||||
Map<TopicPartition, List<Integer>> expCurrent = new HashMap<>();
|
||||
|
||||
expCurrent.put(new TopicPartition("foo", 0), asList(0, 1, 2));
|
||||
expCurrent.put(new TopicPartition("foo", 1), asList(1, 2, 3));
|
||||
expCurrent.put(new TopicPartition("foo", 0), List.of(0, 1, 2));
|
||||
expCurrent.put(new TopicPartition("foo", 1), List.of(1, 2, 3));
|
||||
|
||||
assertEquals(expCurrent, proposedCurrent.getValue());
|
||||
}
|
||||
|
@ -411,7 +411,7 @@ public class ReassignPartitionsUnitTest {
|
|||
public void testGenerateAssignmentWithFewerBrokers() throws Exception {
|
||||
try (MockAdminClient adminClient = new MockAdminClient.Builder().numBrokers(4).build()) {
|
||||
addTopics(adminClient);
|
||||
List<Integer> goalBrokers = asList(0, 1, 3);
|
||||
List<Integer> goalBrokers = List.of(0, 1, 3);
|
||||
|
||||
Entry<Map<TopicPartition, List<Integer>>, Map<TopicPartition, List<Integer>>>
|
||||
proposedCurrent = generateAssignment(adminClient,
|
||||
|
@ -420,9 +420,9 @@ public class ReassignPartitionsUnitTest {
|
|||
|
||||
Map<TopicPartition, List<Integer>> expCurrent = new HashMap<>();
|
||||
|
||||
expCurrent.put(new TopicPartition("foo", 0), asList(0, 1, 2));
|
||||
expCurrent.put(new TopicPartition("foo", 1), asList(1, 2, 3));
|
||||
expCurrent.put(new TopicPartition("bar", 0), asList(2, 3, 0));
|
||||
expCurrent.put(new TopicPartition("foo", 0), List.of(0, 1, 2));
|
||||
expCurrent.put(new TopicPartition("foo", 1), List.of(1, 2, 3));
|
||||
expCurrent.put(new TopicPartition("bar", 0), List.of(2, 3, 0));
|
||||
|
||||
assertEquals(expCurrent, proposedCurrent.getValue());
|
||||
|
||||
|
@ -438,15 +438,15 @@ public class ReassignPartitionsUnitTest {
|
|||
public void testCurrentPartitionReplicaAssignmentToString() throws Exception {
|
||||
Map<TopicPartition, List<Integer>> proposedParts = new HashMap<>();
|
||||
|
||||
proposedParts.put(new TopicPartition("foo", 1), asList(1, 2, 3));
|
||||
proposedParts.put(new TopicPartition("bar", 0), asList(7, 8, 9));
|
||||
proposedParts.put(new TopicPartition("foo", 1), List.of(1, 2, 3));
|
||||
proposedParts.put(new TopicPartition("bar", 0), List.of(7, 8, 9));
|
||||
|
||||
Map<TopicPartition, List<Integer>> currentParts = new HashMap<>();
|
||||
|
||||
currentParts.put(new TopicPartition("foo", 0), asList(1, 2, 3));
|
||||
currentParts.put(new TopicPartition("foo", 1), asList(4, 5, 6));
|
||||
currentParts.put(new TopicPartition("bar", 0), asList(7, 8));
|
||||
currentParts.put(new TopicPartition("baz", 0), asList(10, 11, 12));
|
||||
currentParts.put(new TopicPartition("foo", 0), List.of(1, 2, 3));
|
||||
currentParts.put(new TopicPartition("foo", 1), List.of(4, 5, 6));
|
||||
currentParts.put(new TopicPartition("bar", 0), List.of(7, 8));
|
||||
currentParts.put(new TopicPartition("baz", 0), List.of(10, 11, 12));
|
||||
|
||||
assertEquals(String.join(System.lineSeparator(),
|
||||
"Current partition replica assignment",
|
||||
|
@ -473,37 +473,37 @@ public class ReassignPartitionsUnitTest {
|
|||
Map<TopicPartition, PartitionReassignment> currentReassignments = new HashMap<>();
|
||||
|
||||
currentReassignments.put(new TopicPartition("foo", 0), new PartitionReassignment(
|
||||
asList(1, 2, 3, 4), asList(4), asList(3)));
|
||||
List.of(1, 2, 3, 4), List.of(4), List.of(3)));
|
||||
currentReassignments.put(new TopicPartition("foo", 1), new PartitionReassignment(
|
||||
asList(4, 5, 6, 7, 8), asList(7, 8), asList(4, 5)));
|
||||
List.of(4, 5, 6, 7, 8), List.of(7, 8), List.of(4, 5)));
|
||||
currentReassignments.put(new TopicPartition("foo", 2), new PartitionReassignment(
|
||||
asList(1, 2, 3, 4), asList(3, 4), asList(1, 2)));
|
||||
List.of(1, 2, 3, 4), List.of(3, 4), List.of(1, 2)));
|
||||
currentReassignments.put(new TopicPartition("foo", 3), new PartitionReassignment(
|
||||
asList(1, 2, 3, 4), asList(3, 4), asList(1, 2)));
|
||||
List.of(1, 2, 3, 4), List.of(3, 4), List.of(1, 2)));
|
||||
currentReassignments.put(new TopicPartition("foo", 4), new PartitionReassignment(
|
||||
asList(1, 2, 3, 4), asList(3, 4), asList(1, 2)));
|
||||
List.of(1, 2, 3, 4), List.of(3, 4), List.of(1, 2)));
|
||||
currentReassignments.put(new TopicPartition("foo", 5), new PartitionReassignment(
|
||||
asList(1, 2, 3, 4), asList(3, 4), asList(1, 2)));
|
||||
List.of(1, 2, 3, 4), List.of(3, 4), List.of(1, 2)));
|
||||
|
||||
Map<TopicPartition, List<Integer>> proposedParts = new HashMap<>();
|
||||
|
||||
proposedParts.put(new TopicPartition("foo", 0), asList(1, 2, 5));
|
||||
proposedParts.put(new TopicPartition("foo", 2), asList(3, 4));
|
||||
proposedParts.put(new TopicPartition("foo", 3), asList(5, 6));
|
||||
proposedParts.put(new TopicPartition("foo", 4), asList(3));
|
||||
proposedParts.put(new TopicPartition("foo", 5), asList(3, 4, 5, 6));
|
||||
proposedParts.put(new TopicPartition("bar", 0), asList(1, 2, 3));
|
||||
proposedParts.put(new TopicPartition("foo", 0), List.of(1, 2, 5));
|
||||
proposedParts.put(new TopicPartition("foo", 2), List.of(3, 4));
|
||||
proposedParts.put(new TopicPartition("foo", 3), List.of(5, 6));
|
||||
proposedParts.put(new TopicPartition("foo", 4), List.of(3));
|
||||
proposedParts.put(new TopicPartition("foo", 5), List.of(3, 4, 5, 6));
|
||||
proposedParts.put(new TopicPartition("bar", 0), List.of(1, 2, 3));
|
||||
|
||||
Map<TopicPartition, List<Integer>> currentParts = new HashMap<>();
|
||||
|
||||
currentParts.put(new TopicPartition("foo", 0), asList(1, 2, 3, 4));
|
||||
currentParts.put(new TopicPartition("foo", 1), asList(4, 5, 6, 7, 8));
|
||||
currentParts.put(new TopicPartition("foo", 2), asList(1, 2, 3, 4));
|
||||
currentParts.put(new TopicPartition("foo", 3), asList(1, 2, 3, 4));
|
||||
currentParts.put(new TopicPartition("foo", 4), asList(1, 2, 3, 4));
|
||||
currentParts.put(new TopicPartition("foo", 5), asList(1, 2, 3, 4));
|
||||
currentParts.put(new TopicPartition("bar", 0), asList(2, 3, 4));
|
||||
currentParts.put(new TopicPartition("baz", 0), asList(1, 2, 3));
|
||||
currentParts.put(new TopicPartition("foo", 0), List.of(1, 2, 3, 4));
|
||||
currentParts.put(new TopicPartition("foo", 1), List.of(4, 5, 6, 7, 8));
|
||||
currentParts.put(new TopicPartition("foo", 2), List.of(1, 2, 3, 4));
|
||||
currentParts.put(new TopicPartition("foo", 3), List.of(1, 2, 3, 4));
|
||||
currentParts.put(new TopicPartition("foo", 4), List.of(1, 2, 3, 4));
|
||||
currentParts.put(new TopicPartition("foo", 5), List.of(1, 2, 3, 4));
|
||||
currentParts.put(new TopicPartition("bar", 0), List.of(2, 3, 4));
|
||||
currentParts.put(new TopicPartition("baz", 0), List.of(1, 2, 3));
|
||||
|
||||
Map<String, Map<Integer, PartitionMove>> moveMap = calculateProposedMoveMap(currentReassignments, proposedParts, currentParts);
|
||||
|
||||
|
@ -570,8 +570,8 @@ public class ReassignPartitionsUnitTest {
|
|||
|
||||
Map<TopicPartition, List<Integer>> partitionsToBeReassigned = new HashMap<>();
|
||||
|
||||
partitionsToBeReassigned.put(new TopicPartition("foo", 0), asList(1, 2, 3));
|
||||
partitionsToBeReassigned.put(new TopicPartition("foo", 1), asList(3, 4, 5));
|
||||
partitionsToBeReassigned.put(new TopicPartition("foo", 0), List.of(1, 2, 3));
|
||||
partitionsToBeReassigned.put(new TopicPartition("foo", 1), List.of(3, 4, 5));
|
||||
|
||||
Entry<Map<TopicPartition, List<Integer>>, Map<TopicPartitionReplica, String>> actual = parseExecuteAssignmentArgs(
|
||||
"{\"version\":1,\"partitions\":" +
|
||||
|
@ -593,7 +593,7 @@ public class ReassignPartitionsUnitTest {
|
|||
"[{\"topic\":\"foo\",\"partition\":0,\"replicas\":[1,2,3],\"log_dirs\":[\"/tmp/a\",\"/tmp/b\",\"/tmp/c\"]}" +
|
||||
"]}");
|
||||
|
||||
assertEquals(Collections.singletonMap(new TopicPartition("foo", 0), asList(1, 2, 3)), actual.getKey());
|
||||
assertEquals(Map.of(new TopicPartition("foo", 0), List.of(1, 2, 3)), actual.getKey());
|
||||
assertEquals(replicaAssignment, actual.getValue());
|
||||
}
|
||||
|
||||
|
@ -666,9 +666,9 @@ public class ReassignPartitionsUnitTest {
|
|||
|
||||
Map<TopicPartition, List<Integer>> reassignments = new HashMap<>();
|
||||
|
||||
reassignments.put(new TopicPartition("foo", 1), asList(4, 5, 3));
|
||||
reassignments.put(new TopicPartition("foo", 0), asList(0, 1, 4, 2));
|
||||
reassignments.put(new TopicPartition("bar", 0), asList(2, 3));
|
||||
reassignments.put(new TopicPartition("foo", 1), List.of(4, 5, 3));
|
||||
reassignments.put(new TopicPartition("foo", 0), List.of(0, 1, 4, 2));
|
||||
reassignments.put(new TopicPartition("bar", 0), List.of(2, 3));
|
||||
|
||||
Map<TopicPartition, Throwable> reassignmentResult = alterPartitionReassignments(adminClient, reassignments, false);
|
||||
|
||||
|
@ -711,9 +711,9 @@ public class ReassignPartitionsUnitTest {
|
|||
|
||||
modifyTopicThrottles(adminClient,
|
||||
leaderThrottles,
|
||||
Collections.singletonMap("bar", "followerBar"));
|
||||
Map.of("bar", "followerBar"));
|
||||
List<ConfigResource> topics = Stream.of("bar", "foo").map(
|
||||
id -> new ConfigResource(ConfigResource.Type.TOPIC, id)).collect(Collectors.toList());
|
||||
id -> new ConfigResource(ConfigResource.Type.TOPIC, id)).toList();
|
||||
Map<ConfigResource, Config> results = adminClient.describeConfigs(topics).all().get();
|
||||
verifyTopicThrottleResults(results.get(topics.get(0)), "leaderBar", "followerBar");
|
||||
verifyTopicThrottleResults(results.get(topics.get(1)), "leaderFoo", "");
|
||||
|
@ -736,7 +736,7 @@ public class ReassignPartitionsUnitTest {
|
|||
try (MockAdminClient adminClient = new MockAdminClient.Builder().
|
||||
numBrokers(4).
|
||||
brokerLogDirs(Collections.nCopies(4,
|
||||
asList("/tmp/kafka-logs0", "/tmp/kafka-logs1"))).
|
||||
List.of("/tmp/kafka-logs0", "/tmp/kafka-logs1"))).
|
||||
build()) {
|
||||
|
||||
addTopics(adminClient);
|
||||
|
|
|
@ -50,7 +50,6 @@ import org.junit.jupiter.api.Timeout;
|
|||
|
||||
import java.time.Duration;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
|
@ -409,7 +408,7 @@ public class DeleteStreamsGroupOffsetTest {
|
|||
private static StreamsBuilder builder(String inputTopic, String outputTopic) {
|
||||
final StreamsBuilder builder = new StreamsBuilder();
|
||||
builder.stream(inputTopic, Consumed.with(Serdes.String(), Serdes.String()))
|
||||
.flatMapValues(value -> Arrays.asList(value.toLowerCase(Locale.getDefault()).split("\\W+")))
|
||||
.flatMapValues(value -> List.of(value.toLowerCase(Locale.getDefault()).split("\\W+")))
|
||||
.groupBy((key, value) -> value)
|
||||
.count()
|
||||
.toStream().to(outputTopic, Produced.with(Serdes.String(), Serdes.Long()));
|
||||
|
|
|
@ -55,7 +55,6 @@ import org.junit.jupiter.api.Timeout;
|
|||
|
||||
import java.time.Duration;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
|
@ -560,7 +559,7 @@ public class DeleteStreamsGroupTest {
|
|||
private static StreamsBuilder builder(String inputTopic, String outputTopic) {
|
||||
final StreamsBuilder builder = new StreamsBuilder();
|
||||
builder.stream(inputTopic, Consumed.with(Serdes.String(), Serdes.String()))
|
||||
.flatMapValues(value -> Arrays.asList(value.toLowerCase(Locale.getDefault()).split("\\W+")))
|
||||
.flatMapValues(value -> List.of(value.toLowerCase(Locale.getDefault()).split("\\W+")))
|
||||
.groupBy((key, value) -> value)
|
||||
.count()
|
||||
.toStream().to(outputTopic, Produced.with(Serdes.String(), Serdes.Long()));
|
||||
|
|
|
@ -129,10 +129,10 @@ public class DescribeStreamsGroupTest {
|
|||
List.of(APP_ID, "streams-group-command-test-KSTREAM-AGGREGATE-STATE-STORE-0000000003-repartition", "1", "0"));
|
||||
|
||||
validateDescribeOutput(
|
||||
Arrays.asList("--bootstrap-server", bootstrapServers, "--describe", "--group", APP_ID), expectedHeader, expectedRows, List.of());
|
||||
List.of("--bootstrap-server", bootstrapServers, "--describe", "--group", APP_ID), expectedHeader, expectedRows, List.of());
|
||||
// --describe --offsets has the same output as --describe
|
||||
validateDescribeOutput(
|
||||
Arrays.asList("--bootstrap-server", bootstrapServers, "--describe", "--offsets", "--group", APP_ID), expectedHeader, expectedRows, List.of());
|
||||
List.of("--bootstrap-server", bootstrapServers, "--describe", "--offsets", "--group", APP_ID), expectedHeader, expectedRows, List.of());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -145,35 +145,35 @@ public class DescribeStreamsGroupTest {
|
|||
List.of(APP_ID, "streams-group-command-test-KSTREAM-AGGREGATE-STATE-STORE-0000000003-repartition", "1", "-", "-", "0", "0"));
|
||||
|
||||
validateDescribeOutput(
|
||||
Arrays.asList("--bootstrap-server", bootstrapServers, "--describe", "--verbose", "--group", APP_ID), expectedHeader, expectedRows, List.of());
|
||||
List.of("--bootstrap-server", bootstrapServers, "--describe", "--verbose", "--group", APP_ID), expectedHeader, expectedRows, List.of());
|
||||
// --describe --offsets has the same output as --describe
|
||||
validateDescribeOutput(
|
||||
Arrays.asList("--bootstrap-server", bootstrapServers, "--describe", "--offsets", "--verbose", "--group", APP_ID), expectedHeader, expectedRows, List.of());
|
||||
List.of("--bootstrap-server", bootstrapServers, "--describe", "--offsets", "--verbose", "--group", APP_ID), expectedHeader, expectedRows, List.of());
|
||||
validateDescribeOutput(
|
||||
Arrays.asList("--bootstrap-server", bootstrapServers, "--describe", "--verbose", "--offsets", "--group", APP_ID), expectedHeader, expectedRows, List.of());
|
||||
List.of("--bootstrap-server", bootstrapServers, "--describe", "--verbose", "--offsets", "--group", APP_ID), expectedHeader, expectedRows, List.of());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDescribeStreamsGroupWithStateOption() throws Exception {
|
||||
final List<String> expectedHeader = Arrays.asList("GROUP", "COORDINATOR", "(ID)", "STATE", "#MEMBERS");
|
||||
final Set<List<String>> expectedRows = Set.of(Arrays.asList(APP_ID, "", "", "Stable", "2"));
|
||||
final List<String> expectedHeader = List.of("GROUP", "COORDINATOR", "(ID)", "STATE", "#MEMBERS");
|
||||
final Set<List<String>> expectedRows = Set.of(List.of(APP_ID, "", "", "Stable", "2"));
|
||||
// The coordinator is not deterministic, so we don't care about it.
|
||||
final List<Integer> dontCares = List.of(1, 2);
|
||||
validateDescribeOutput(
|
||||
Arrays.asList("--bootstrap-server", bootstrapServers, "--describe", "--state", "--group", APP_ID), expectedHeader, expectedRows, dontCares);
|
||||
List.of("--bootstrap-server", bootstrapServers, "--describe", "--state", "--group", APP_ID), expectedHeader, expectedRows, dontCares);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDescribeStreamsGroupWithStateAndVerboseOptions() throws Exception {
|
||||
final List<String> expectedHeader = Arrays.asList("GROUP", "COORDINATOR", "(ID)", "STATE", "GROUP-EPOCH", "TARGET-ASSIGNMENT-EPOCH", "#MEMBERS");
|
||||
final Set<List<String>> expectedRows = Set.of(Arrays.asList(APP_ID, "", "", "Stable", "3", "3", "2"));
|
||||
final List<String> expectedHeader = List.of("GROUP", "COORDINATOR", "(ID)", "STATE", "GROUP-EPOCH", "TARGET-ASSIGNMENT-EPOCH", "#MEMBERS");
|
||||
final Set<List<String>> expectedRows = Set.of(List.of(APP_ID, "", "", "Stable", "3", "3", "2"));
|
||||
// The coordinator is not deterministic, so we don't care about it.
|
||||
final List<Integer> dontCares = List.of(1, 2);
|
||||
|
||||
validateDescribeOutput(
|
||||
Arrays.asList("--bootstrap-server", bootstrapServers, "--describe", "--state", "--verbose", "--group", APP_ID), expectedHeader, expectedRows, dontCares);
|
||||
List.of("--bootstrap-server", bootstrapServers, "--describe", "--state", "--verbose", "--group", APP_ID), expectedHeader, expectedRows, dontCares);
|
||||
validateDescribeOutput(
|
||||
Arrays.asList("--bootstrap-server", bootstrapServers, "--describe", "--verbose", "--state", "--group", APP_ID), expectedHeader, expectedRows, dontCares);
|
||||
List.of("--bootstrap-server", bootstrapServers, "--describe", "--verbose", "--state", "--group", APP_ID), expectedHeader, expectedRows, dontCares);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -186,7 +186,7 @@ public class DescribeStreamsGroupTest {
|
|||
final List<Integer> dontCares = List.of(1, 2, 3);
|
||||
|
||||
validateDescribeOutput(
|
||||
Arrays.asList("--bootstrap-server", bootstrapServers, "--describe", "--members", "--group", APP_ID), expectedHeader, expectedRows, dontCares);
|
||||
List.of("--bootstrap-server", bootstrapServers, "--describe", "--members", "--group", APP_ID), expectedHeader, expectedRows, dontCares);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -199,9 +199,9 @@ public class DescribeStreamsGroupTest {
|
|||
final List<Integer> dontCares = List.of(3, 6, 7);
|
||||
|
||||
validateDescribeOutput(
|
||||
Arrays.asList("--bootstrap-server", bootstrapServers, "--describe", "--members", "--verbose", "--group", APP_ID), expectedHeader, expectedRows, dontCares);
|
||||
List.of("--bootstrap-server", bootstrapServers, "--describe", "--members", "--verbose", "--group", APP_ID), expectedHeader, expectedRows, dontCares);
|
||||
validateDescribeOutput(
|
||||
Arrays.asList("--bootstrap-server", bootstrapServers, "--describe", "--verbose", "--members", "--group", APP_ID), expectedHeader, expectedRows, dontCares);
|
||||
List.of("--bootstrap-server", bootstrapServers, "--describe", "--verbose", "--members", "--group", APP_ID), expectedHeader, expectedRows, dontCares);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -225,13 +225,13 @@ public class DescribeStreamsGroupTest {
|
|||
final List<Integer> dontCares = List.of(3, 6, 7);
|
||||
|
||||
validateDescribeOutput(
|
||||
Arrays.asList("--bootstrap-server", bootstrapServers, "--describe", "--members", "--verbose", "--group", APP_ID, "--group", APP_ID_2),
|
||||
List.of("--bootstrap-server", bootstrapServers, "--describe", "--members", "--verbose", "--group", APP_ID, "--group", APP_ID_2),
|
||||
expectedHeader, expectedRowsMap, dontCares);
|
||||
validateDescribeOutput(
|
||||
Arrays.asList("--bootstrap-server", bootstrapServers, "--describe", "--verbose", "--members", "--group", APP_ID, "--group", APP_ID_2),
|
||||
List.of("--bootstrap-server", bootstrapServers, "--describe", "--verbose", "--members", "--group", APP_ID, "--group", APP_ID_2),
|
||||
expectedHeader, expectedRowsMap, dontCares);
|
||||
validateDescribeOutput(
|
||||
Arrays.asList("--bootstrap-server", bootstrapServers, "--describe", "--verbose", "--members", "--all-groups"),
|
||||
List.of("--bootstrap-server", bootstrapServers, "--describe", "--verbose", "--members", "--all-groups"),
|
||||
expectedHeader, expectedRowsMap, dontCares);
|
||||
|
||||
streams2.close();
|
||||
|
@ -246,14 +246,14 @@ public class DescribeStreamsGroupTest {
|
|||
nonExistingGroup);
|
||||
|
||||
validateDescribeOutput(
|
||||
Arrays.asList("--bootstrap-server", bootstrapServers, "--describe", "--members", "--verbose", "--group", nonExistingGroup), errorMessage);
|
||||
List.of("--bootstrap-server", bootstrapServers, "--describe", "--members", "--verbose", "--group", nonExistingGroup), errorMessage);
|
||||
validateDescribeOutput(
|
||||
Arrays.asList("--bootstrap-server", bootstrapServers, "--describe", "--verbose", "--members", "--group", nonExistingGroup), errorMessage);
|
||||
List.of("--bootstrap-server", bootstrapServers, "--describe", "--verbose", "--members", "--group", nonExistingGroup), errorMessage);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDescribeStreamsGroupWithShortTimeout() {
|
||||
List<String> args = Arrays.asList("--bootstrap-server", bootstrapServers, "--describe", "--members", "--verbose", "--group", APP_ID, "--timeout", "1");
|
||||
List<String> args = List.of("--bootstrap-server", bootstrapServers, "--describe", "--members", "--verbose", "--group", APP_ID, "--timeout", "1");
|
||||
Throwable e = assertThrows(ExecutionException.class, () -> getStreamsGroupService(args.toArray(new String[0])).describeGroups());
|
||||
assertEquals(TimeoutException.class, e.getCause().getClass());
|
||||
}
|
||||
|
@ -261,7 +261,7 @@ public class DescribeStreamsGroupTest {
|
|||
private static Topology topology(String inputTopic, String outputTopic) {
|
||||
final StreamsBuilder builder = new StreamsBuilder();
|
||||
builder.stream(inputTopic, Consumed.with(Serdes.String(), Serdes.String()))
|
||||
.flatMapValues(value -> Arrays.asList(value.toLowerCase(Locale.getDefault()).split("\\W+")))
|
||||
.flatMapValues(value -> List.of(value.toLowerCase(Locale.getDefault()).split("\\W+")))
|
||||
.groupBy((key, value) -> value)
|
||||
.count()
|
||||
.toStream().to(outputTopic, Produced.with(Serdes.String(), Serdes.Long()));
|
||||
|
@ -309,11 +309,11 @@ public class DescribeStreamsGroupTest {
|
|||
if (lines.length == 1 && lines[0].isEmpty()) lines = new String[]{};
|
||||
|
||||
if (lines.length == 0) return false;
|
||||
List<String> header = Arrays.asList(lines[0].split("\\s+"));
|
||||
List<String> header = List.of(lines[0].split("\\s+"));
|
||||
if (!expectedHeader.equals(header)) return false;
|
||||
|
||||
Set<List<String>> groupDesc = Arrays.stream(Arrays.copyOfRange(lines, 1, lines.length))
|
||||
.map(line -> Arrays.asList(line.split("\\s+")))
|
||||
.map(line -> List.of(line.split("\\s+")))
|
||||
.collect(Collectors.toSet());
|
||||
if (groupDesc.size() != expectedRows.size()) return false;
|
||||
// clear the dontCare fields and then compare two sets
|
||||
|
@ -344,7 +344,7 @@ public class DescribeStreamsGroupTest {
|
|||
if (lines.length == 1 && lines[0].isEmpty()) lines = new String[]{};
|
||||
|
||||
if (lines.length == 0) return false;
|
||||
List<String> header = Arrays.asList(lines[0].split("\\s+"));
|
||||
List<String> header = List.of(lines[0].split("\\s+"));
|
||||
if (!expectedHeader.equals(header)) return false;
|
||||
|
||||
Map<String, Set<List<String>>> groupdescMap = splitOutputByGroup(lines);
|
||||
|
@ -382,7 +382,7 @@ public class DescribeStreamsGroupTest {
|
|||
if (lines[i].replaceAll(" ", "").equals(headerLine) || i == lines.length - 1) {
|
||||
if (i == lines.length - 1) i++;
|
||||
Set<List<String>> groupDesc = Arrays.stream(Arrays.copyOfRange(lines, j, i))
|
||||
.map(line -> Arrays.asList(line.split("\\s+")))
|
||||
.map(line -> List.of(line.split("\\s+")))
|
||||
.collect(Collectors.toSet());
|
||||
groupdescMap.put(groupName, groupDesc);
|
||||
if (i + 1 < lines.length) {
|
||||
|
|
|
@ -42,10 +42,10 @@ import org.junit.jupiter.api.Test;
|
|||
import org.junit.jupiter.api.Timeout;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
import java.util.Properties;
|
||||
|
@ -133,7 +133,7 @@ public class ListStreamsGroupTest {
|
|||
final AtomicReference<Set<GroupListing>> foundListing = new AtomicReference<>();
|
||||
|
||||
TestUtils.waitForCondition(() -> {
|
||||
foundListing.set(new HashSet<>(service.listStreamsGroupsInStates(Collections.emptySet())));
|
||||
foundListing.set(new HashSet<>(service.listStreamsGroupsInStates(Set.of())));
|
||||
return Objects.equals(expectedListing, foundListing.get());
|
||||
}, "Expected --list to show streams groups " + expectedListing + ", but found " + foundListing.get() + ".");
|
||||
}
|
||||
|
@ -153,18 +153,18 @@ public class ListStreamsGroupTest {
|
|||
final AtomicReference<Set<GroupListing>> foundListing = new AtomicReference<>();
|
||||
|
||||
TestUtils.waitForCondition(() -> {
|
||||
foundListing.set(new HashSet<>(service.listStreamsGroupsInStates(Collections.emptySet())));
|
||||
foundListing.set(new HashSet<>(service.listStreamsGroupsInStates(Set.of())));
|
||||
return Objects.equals(expectedListing, foundListing.get());
|
||||
}, "Expected --list to show streams groups " + expectedListing + ", but found " + foundListing.get() + ".");
|
||||
}
|
||||
|
||||
try (StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(new String[]{"--bootstrap-server", cluster.bootstrapServers(), "--list", "--state", "PreparingRebalance"})) {
|
||||
Set<GroupListing> expectedListing = Collections.emptySet();
|
||||
Set<GroupListing> expectedListing = Set.of();
|
||||
|
||||
final AtomicReference<Set<GroupListing>> foundListing = new AtomicReference<>();
|
||||
|
||||
TestUtils.waitForCondition(() -> {
|
||||
foundListing.set(new HashSet<>(service.listStreamsGroupsInStates(Collections.singleton(GroupState.PREPARING_REBALANCE))));
|
||||
foundListing.set(new HashSet<>(service.listStreamsGroupsInStates(Set.of(GroupState.PREPARING_REBALANCE))));
|
||||
return Objects.equals(expectedListing, foundListing.get());
|
||||
}, "Expected --list to show streams groups " + expectedListing + ", but found " + foundListing.get() + ".");
|
||||
}
|
||||
|
@ -173,35 +173,35 @@ public class ListStreamsGroupTest {
|
|||
@Test
|
||||
public void testListStreamsGroupOutput() throws Exception {
|
||||
validateListOutput(
|
||||
Arrays.asList("--bootstrap-server", cluster.bootstrapServers(), "--list"),
|
||||
Collections.emptyList(),
|
||||
Set.of(Collections.singletonList(APP_ID))
|
||||
List.of("--bootstrap-server", cluster.bootstrapServers(), "--list"),
|
||||
List.of(),
|
||||
Set.of(List.of(APP_ID))
|
||||
);
|
||||
|
||||
validateListOutput(
|
||||
Arrays.asList("--bootstrap-server", cluster.bootstrapServers(), "--list", "--state"),
|
||||
Arrays.asList("GROUP", "STATE"),
|
||||
Set.of(Arrays.asList(APP_ID, "Stable"))
|
||||
List.of("--bootstrap-server", cluster.bootstrapServers(), "--list", "--state"),
|
||||
List.of("GROUP", "STATE"),
|
||||
Set.of(List.of(APP_ID, "Stable"))
|
||||
);
|
||||
|
||||
validateListOutput(
|
||||
Arrays.asList("--bootstrap-server", cluster.bootstrapServers(), "--list", "--state", "Stable"),
|
||||
Arrays.asList("GROUP", "STATE"),
|
||||
Set.of(Arrays.asList(APP_ID, "Stable"))
|
||||
List.of("--bootstrap-server", cluster.bootstrapServers(), "--list", "--state", "Stable"),
|
||||
List.of("GROUP", "STATE"),
|
||||
Set.of(List.of(APP_ID, "Stable"))
|
||||
);
|
||||
|
||||
// Check case-insensitivity in state filter.
|
||||
validateListOutput(
|
||||
Arrays.asList("--bootstrap-server", cluster.bootstrapServers(), "--list", "--state", "stable"),
|
||||
Arrays.asList("GROUP", "STATE"),
|
||||
Set.of(Arrays.asList(APP_ID, "Stable"))
|
||||
List.of("--bootstrap-server", cluster.bootstrapServers(), "--list", "--state", "stable"),
|
||||
List.of("GROUP", "STATE"),
|
||||
Set.of(List.of(APP_ID, "Stable"))
|
||||
);
|
||||
}
|
||||
|
||||
private static Topology topology() {
|
||||
final StreamsBuilder builder = new StreamsBuilder();
|
||||
builder.stream(INPUT_TOPIC, Consumed.with(Serdes.String(), Serdes.String()))
|
||||
.flatMapValues(value -> Arrays.asList(value.toLowerCase(Locale.getDefault()).split("\\W+")))
|
||||
.flatMapValues(value -> List.of(value.toLowerCase(Locale.getDefault()).split("\\W+")))
|
||||
.groupBy((key, value) -> value)
|
||||
.count()
|
||||
.toStream().to(OUTPUT_TOPIC, Produced.with(Serdes.String(), Serdes.Long()));
|
||||
|
@ -212,7 +212,7 @@ public class ListStreamsGroupTest {
|
|||
StreamsGroupCommandOptions opts = StreamsGroupCommandOptions.fromArgs(args);
|
||||
return new StreamsGroupCommand.StreamsGroupService(
|
||||
opts,
|
||||
Collections.singletonMap(AdminClientConfig.RETRIES_CONFIG, Integer.toString(Integer.MAX_VALUE))
|
||||
Map.of(AdminClientConfig.RETRIES_CONFIG, Integer.toString(Integer.MAX_VALUE))
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -230,12 +230,12 @@ public class ListStreamsGroupTest {
|
|||
if (lines.length == 1 && lines[0].isEmpty()) lines = new String[]{};
|
||||
|
||||
if (!expectedHeader.isEmpty() && lines.length > 0) {
|
||||
List<String> header = Arrays.asList(lines[0].split("\\s+"));
|
||||
List<String> header = List.of(lines[0].split("\\s+"));
|
||||
if (!expectedHeader.equals(header)) return false;
|
||||
}
|
||||
|
||||
Set<List<String>> groups = Arrays.stream(lines, expectedHeader.isEmpty() ? 0 : 1, lines.length)
|
||||
.map(line -> Arrays.asList(line.split("\\s+")))
|
||||
.map(line -> List.of(line.split("\\s+")))
|
||||
.collect(Collectors.toSet());
|
||||
return expectedRows.equals(groups);
|
||||
}, () -> String.format("Expected header=%s and groups=%s, but found:%n%s", expectedHeader, expectedRows, out.get()));
|
||||
|
|
|
@ -70,7 +70,6 @@ import java.util.stream.Collectors;
|
|||
import joptsimple.OptionException;
|
||||
|
||||
import static java.time.LocalDateTime.now;
|
||||
import static java.util.Arrays.asList;
|
||||
import static java.util.stream.Collectors.toMap;
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
||||
|
@ -443,13 +442,13 @@ public class ResetStreamsGroupOffsetTest {
|
|||
}
|
||||
// assert that the reset offsets are as expected
|
||||
assertEquals(expectedResetResults, resetOffsetsResultByGroup);
|
||||
assertEquals(expectedResetResults.values().size(), resetOffsetsResultByGroup.values().size());
|
||||
assertEquals(expectedResetResults.size(), resetOffsetsResultByGroup.size());
|
||||
// assert that the committed offsets are as expected
|
||||
AssertCommittedOffsets(appId, topic, expectedCommittedOffset, partitions);
|
||||
}
|
||||
|
||||
private void resetOffsetsAndAssertInternalTopicDeletion(String[] args, String appId, String... specifiedInternalTopics) throws InterruptedException {
|
||||
List<String> specifiedInternalTopicsList = asList(specifiedInternalTopics);
|
||||
List<String> specifiedInternalTopicsList = List.of(specifiedInternalTopics);
|
||||
Set<String> allInternalTopics = getInternalTopics(appId);
|
||||
specifiedInternalTopicsList.forEach(allInternalTopics::remove);
|
||||
|
||||
|
@ -465,10 +464,10 @@ public class ResetStreamsGroupOffsetTest {
|
|||
);
|
||||
// verify that the specified internal topics were deleted
|
||||
Set<String> internalTopicsAfterReset = getInternalTopics(appId);
|
||||
specifiedInternalTopicsList.forEach(topic -> {
|
||||
specifiedInternalTopicsList.forEach(topic ->
|
||||
assertFalse(internalTopicsAfterReset.contains(topic),
|
||||
"Internal topic '" + topic + "' was not deleted as expected after reset");
|
||||
});
|
||||
"Internal topic '" + topic + "' was not deleted as expected after reset")
|
||||
);
|
||||
|
||||
} else {
|
||||
TestUtils.waitForCondition(() -> {
|
||||
|
@ -530,7 +529,7 @@ public class ResetStreamsGroupOffsetTest {
|
|||
}
|
||||
// assert that the reset offsets are as expected
|
||||
assertEquals(expectedResetResults, resetOffsetsResultByGroup);
|
||||
assertEquals(expectedResetResults.values().size(), resetOffsetsResultByGroup.values().size());
|
||||
assertEquals(expectedResetResults.size(), resetOffsetsResultByGroup.size());
|
||||
// assert that the committed offsets are as expected
|
||||
AssertCommittedOffsets(appId, topic1, topic2, expectedCommittedOffset);
|
||||
}
|
||||
|
@ -565,7 +564,7 @@ public class ResetStreamsGroupOffsetTest {
|
|||
}
|
||||
// assert that the reset offsets are as expected
|
||||
assertEquals(expectedOffsets, resetOffsetsResult);
|
||||
assertEquals(expectedOffsets.values().size(), resetOffsetsResult.values().size());
|
||||
assertEquals(expectedOffsets.size(), resetOffsetsResult.size());
|
||||
// assert that the committed offsets are as expected
|
||||
assertEquals(expectedCommittedOffsets, committedOffsets(topics, appId));
|
||||
}
|
||||
|
@ -631,8 +630,8 @@ public class ResetStreamsGroupOffsetTest {
|
|||
}
|
||||
|
||||
private String[] addTo(String[] args, String... extra) {
|
||||
List<String> res = new ArrayList<>(asList(args));
|
||||
res.addAll(asList(extra));
|
||||
List<String> res = new ArrayList<>(List.of(args));
|
||||
res.addAll(List.of(extra));
|
||||
return res.toArray(new String[0]);
|
||||
}
|
||||
|
||||
|
|
|
@ -52,7 +52,6 @@ import org.mockito.ArgumentMatchers;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
|
@ -91,13 +90,13 @@ public class StreamsGroupCommandTest {
|
|||
|
||||
String[] cgcArgs = new String[]{"--bootstrap-server", BOOTSTRAP_SERVERS, "--list"};
|
||||
ListGroupsResult result = mock(ListGroupsResult.class);
|
||||
when(result.all()).thenReturn(KafkaFuture.completedFuture(Arrays.asList(
|
||||
when(result.all()).thenReturn(KafkaFuture.completedFuture(List.of(
|
||||
new GroupListing(firstGroup, Optional.of(GroupType.STREAMS), "streams", Optional.of(GroupState.STABLE)),
|
||||
new GroupListing(secondGroup, Optional.of(GroupType.STREAMS), "streams", Optional.of(GroupState.EMPTY))
|
||||
)));
|
||||
when(ADMIN_CLIENT.listGroups(any(ListGroupsOptions.class))).thenReturn(result);
|
||||
StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(cgcArgs);
|
||||
Set<String> expectedGroups = new HashSet<>(Arrays.asList(firstGroup, secondGroup));
|
||||
Set<String> expectedGroups = Set.of(firstGroup, secondGroup);
|
||||
|
||||
final Set[] foundGroups = new Set[]{Set.of()};
|
||||
TestUtils.waitForCondition(() -> {
|
||||
|
@ -111,9 +110,7 @@ public class StreamsGroupCommandTest {
|
|||
@Test
|
||||
public void testListWithUnrecognizedOption() {
|
||||
String[] cgcArgs = new String[]{"--frivolous-nonsense", "--bootstrap-server", BOOTSTRAP_SERVERS, "--list"};
|
||||
final Exception exception = assertThrows(OptionException.class, () -> {
|
||||
getStreamsGroupService(cgcArgs);
|
||||
});
|
||||
final Exception exception = assertThrows(OptionException.class, () -> getStreamsGroupService(cgcArgs));
|
||||
assertEquals("frivolous-nonsense is not a recognized option", exception.getMessage());
|
||||
}
|
||||
|
||||
|
@ -124,19 +121,19 @@ public class StreamsGroupCommandTest {
|
|||
|
||||
String[] cgcArgs = new String[]{"--bootstrap-server", BOOTSTRAP_SERVERS, "--list", "--state"};
|
||||
ListGroupsResult resultWithAllStates = mock(ListGroupsResult.class);
|
||||
when(resultWithAllStates.all()).thenReturn(KafkaFuture.completedFuture(Arrays.asList(
|
||||
when(resultWithAllStates.all()).thenReturn(KafkaFuture.completedFuture(List.of(
|
||||
new GroupListing(firstGroup, Optional.of(GroupType.STREAMS), "streams", Optional.of(GroupState.STABLE)),
|
||||
new GroupListing(secondGroup, Optional.of(GroupType.STREAMS), "streams", Optional.of(GroupState.EMPTY))
|
||||
)));
|
||||
when(ADMIN_CLIENT.listGroups(any(ListGroupsOptions.class))).thenReturn(resultWithAllStates);
|
||||
StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(cgcArgs);
|
||||
Set<GroupListing> expectedListing = new HashSet<>(Arrays.asList(
|
||||
Set<GroupListing> expectedListing = Set.of(
|
||||
new GroupListing(firstGroup, Optional.of(GroupType.STREAMS), "streams", Optional.of(GroupState.STABLE)),
|
||||
new GroupListing(secondGroup, Optional.of(GroupType.STREAMS), "streams", Optional.of(GroupState.EMPTY))));
|
||||
new GroupListing(secondGroup, Optional.of(GroupType.STREAMS), "streams", Optional.of(GroupState.EMPTY)));
|
||||
|
||||
final Set[] foundListing = new Set[]{Set.of()};
|
||||
TestUtils.waitForCondition(() -> {
|
||||
foundListing[0] = new HashSet<>(service.listStreamsGroupsInStates(new HashSet<>(Arrays.asList(GroupState.values()))));
|
||||
foundListing[0] = new HashSet<>(service.listStreamsGroupsInStates(Set.of(GroupState.values())));
|
||||
return Objects.equals(expectedListing, foundListing[0]);
|
||||
}, "Expected to show groups " + expectedListing + ", but found " + foundListing[0]);
|
||||
|
||||
|
@ -257,34 +254,34 @@ public class StreamsGroupCommandTest {
|
|||
@Test
|
||||
public void testGroupStatesFromString() {
|
||||
Set<GroupState> result = StreamsGroupCommand.groupStatesFromString("empty");
|
||||
assertEquals(new HashSet<>(List.of(GroupState.EMPTY)), result);
|
||||
assertEquals(Set.of(GroupState.EMPTY), result);
|
||||
result = StreamsGroupCommand.groupStatesFromString("EMPTY");
|
||||
assertEquals(new HashSet<>(List.of(GroupState.EMPTY)), result);
|
||||
assertEquals(Set.of(GroupState.EMPTY), result);
|
||||
|
||||
result = StreamsGroupCommand.groupStatesFromString("notready");
|
||||
assertEquals(new HashSet<>(List.of(GroupState.NOT_READY)), result);
|
||||
assertEquals(Set.of(GroupState.NOT_READY), result);
|
||||
result = StreamsGroupCommand.groupStatesFromString("notReady");
|
||||
assertEquals(new HashSet<>(List.of(GroupState.NOT_READY)), result);
|
||||
assertEquals(Set.of(GroupState.NOT_READY), result);
|
||||
|
||||
result = StreamsGroupCommand.groupStatesFromString("assigning");
|
||||
assertEquals(new HashSet<>(List.of(GroupState.ASSIGNING)), result);
|
||||
assertEquals(Set.of(GroupState.ASSIGNING), result);
|
||||
result = StreamsGroupCommand.groupStatesFromString("ASSIGNING");
|
||||
assertEquals(new HashSet<>(List.of(GroupState.ASSIGNING)), result);
|
||||
assertEquals(Set.of(GroupState.ASSIGNING), result);
|
||||
|
||||
result = StreamsGroupCommand.groupStatesFromString("RECONCILING");
|
||||
assertEquals(new HashSet<>(List.of(GroupState.RECONCILING)), result);
|
||||
assertEquals(Set.of(GroupState.RECONCILING), result);
|
||||
result = StreamsGroupCommand.groupStatesFromString("reconCILING");
|
||||
assertEquals(new HashSet<>(List.of(GroupState.RECONCILING)), result);
|
||||
assertEquals(Set.of(GroupState.RECONCILING), result);
|
||||
|
||||
result = StreamsGroupCommand.groupStatesFromString("STABLE");
|
||||
assertEquals(new HashSet<>(List.of(GroupState.STABLE)), result);
|
||||
assertEquals(Set.of(GroupState.STABLE), result);
|
||||
result = StreamsGroupCommand.groupStatesFromString("stable");
|
||||
assertEquals(new HashSet<>(List.of(GroupState.STABLE)), result);
|
||||
assertEquals(Set.of(GroupState.STABLE), result);
|
||||
|
||||
result = StreamsGroupCommand.groupStatesFromString("DEAD");
|
||||
assertEquals(new HashSet<>(List.of(GroupState.DEAD)), result);
|
||||
assertEquals(Set.of(GroupState.DEAD), result);
|
||||
result = StreamsGroupCommand.groupStatesFromString("dead");
|
||||
assertEquals(new HashSet<>(List.of(GroupState.DEAD)), result);
|
||||
assertEquals(Set.of(GroupState.DEAD), result);
|
||||
|
||||
assertThrow("preparingRebalance");
|
||||
assertThrow("completingRebalance");
|
||||
|
@ -297,7 +294,7 @@ public class StreamsGroupCommandTest {
|
|||
public void testAdminRequestsForResetOffsets() {
|
||||
Admin adminClient = mock(KafkaAdminClient.class);
|
||||
String groupId = "foo-group";
|
||||
List<String> args = new ArrayList<>(Arrays.asList("--bootstrap-server", "localhost:9092", "--group", groupId, "--reset-offsets", "--input-topic", "topic1", "--to-latest"));
|
||||
List<String> args = List.of("--bootstrap-server", "localhost:9092", "--group", groupId, "--reset-offsets", "--input-topic", "topic1", "--to-latest");
|
||||
List<String> topics = List.of("topic1");
|
||||
|
||||
when(adminClient.describeStreamsGroups(List.of(groupId)))
|
||||
|
@ -317,8 +314,8 @@ public class StreamsGroupCommandTest {
|
|||
StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(args.toArray(new String[0]), adminClient);
|
||||
Map<String, Map<TopicPartition, OffsetAndMetadata>> resetResult = service.resetOffsets();
|
||||
|
||||
assertEquals(Collections.singleton(groupId), resetResult.keySet());
|
||||
assertEquals(new HashSet<>(List.of(new TopicPartition(topics.get(0), 0))),
|
||||
assertEquals(Set.of(groupId), resetResult.keySet());
|
||||
assertEquals(Set.of(new TopicPartition(topics.get(0), 0)),
|
||||
resetResult.get(groupId).keySet());
|
||||
|
||||
verify(adminClient, times(1)).describeStreamsGroups(List.of(groupId));
|
||||
|
@ -332,7 +329,7 @@ public class StreamsGroupCommandTest {
|
|||
@Test
|
||||
public void testRetrieveInternalTopics() {
|
||||
String groupId = "foo-group";
|
||||
List<String> args = new ArrayList<>(Arrays.asList("--bootstrap-server", "localhost:9092", "--group", groupId, "--delete"));
|
||||
List<String> args = List.of("--bootstrap-server", "localhost:9092", "--group", groupId, "--delete");
|
||||
List<String> sourceTopics = List.of("source-topic1", "source-topic2");
|
||||
List<String> repartitionSinkTopics = List.of("rep-sink-topic1", "rep-sink-topic2");
|
||||
Map<String, StreamsGroupSubtopologyDescription.TopicInfo> stateChangelogTopics = Map.of(
|
||||
|
@ -365,7 +362,7 @@ public class StreamsGroupCommandTest {
|
|||
|
||||
assertNotNull(internalTopics.get(groupId));
|
||||
assertEquals(4, internalTopics.get(groupId).size());
|
||||
assertEquals(new HashSet<>(List.of(groupId + "-1-changelog", groupId + "-2-changelog", groupId + "-1-repartition", groupId + "-2-repartition")),
|
||||
assertEquals(Set.of(groupId + "-1-changelog", groupId + "-2-changelog", groupId + "-1-repartition", groupId + "-2-repartition"),
|
||||
new HashSet<>(internalTopics.get(groupId)));
|
||||
assertFalse(internalTopics.get(groupId).stream().anyMatch(List.of("some-pre-fix-changelog", groupId + "-some-thing")::contains));
|
||||
assertFalse(internalTopics.get(groupId).stream().anyMatch(sourceTopics::contains));
|
||||
|
@ -378,7 +375,7 @@ public class StreamsGroupCommandTest {
|
|||
public void testDeleteStreamsGroup() {
|
||||
Admin adminClient = mock(KafkaAdminClient.class);
|
||||
String groupId = "foo-group";
|
||||
List<String> args = new ArrayList<>(Arrays.asList("--bootstrap-server", "localhost:9092", "--group", groupId, "--delete", "--delete-all-internal-topics"));
|
||||
List<String> args = List.of("--bootstrap-server", "localhost:9092", "--group", groupId, "--delete", "--delete-all-internal-topics");
|
||||
|
||||
DeleteStreamsGroupsResult deleteStreamsGroupsResult = mock(DeleteStreamsGroupsResult.class);
|
||||
when(adminClient.deleteStreamsGroups(eq(List.of(groupId)), any(DeleteStreamsGroupsOptions.class))).thenReturn(deleteStreamsGroupsResult);
|
||||
|
@ -409,7 +406,7 @@ public class StreamsGroupCommandTest {
|
|||
public void testDeleteNonStreamsGroup() {
|
||||
Admin adminClient = mock(KafkaAdminClient.class);
|
||||
String groupId = "foo-group";
|
||||
List<String> args = new ArrayList<>(Arrays.asList("--bootstrap-server", "localhost:9092", "--group", groupId, "--delete"));
|
||||
List<String> args = List.of("--bootstrap-server", "localhost:9092", "--group", groupId, "--delete");
|
||||
|
||||
ListGroupsResult listGroupsResult = mock(ListGroupsResult.class);
|
||||
when(adminClient.listGroups(any())).thenReturn(listGroupsResult);
|
||||
|
@ -419,8 +416,8 @@ public class StreamsGroupCommandTest {
|
|||
Map<String, Throwable> result = service.deleteGroups();
|
||||
|
||||
assertNotNull(result.get(groupId));
|
||||
assertEquals(result.get(groupId).getMessage(),
|
||||
"Group '" + groupId + "' does not exist or is not a streams group.");
|
||||
assertEquals("Group '" + groupId + "' does not exist or is not a streams group.",
|
||||
result.get(groupId).getMessage());
|
||||
assertInstanceOf(IllegalArgumentException.class, result.get(groupId));
|
||||
verify(adminClient, times(1)).listGroups(any(ListGroupsOptions.class));
|
||||
// we do not expect any further API to be called
|
||||
|
@ -450,7 +447,7 @@ public class StreamsGroupCommandTest {
|
|||
}
|
||||
|
||||
private static void assertThrow(final String wrongState) {
|
||||
final Set<String> validStates = new HashSet<>(Arrays.asList("Assigning", "Dead", "Empty", "Reconciling", "Stable", "NotReady"));
|
||||
final Set<String> validStates = Set.of("Assigning", "Dead", "Empty", "Reconciling", "Stable", "NotReady");
|
||||
|
||||
final Exception exception = assertThrows(IllegalArgumentException.class, () -> StreamsGroupCommand.groupStatesFromString(wrongState));
|
||||
|
||||
|
@ -473,25 +470,21 @@ public class StreamsGroupCommandTest {
|
|||
0,
|
||||
0,
|
||||
0,
|
||||
Collections.singletonList(new StreamsGroupSubtopologyDescription("subtopologyId", Collections.emptyList(), Collections.emptyList(), Map.of(), Map.of())),
|
||||
List.of(new StreamsGroupSubtopologyDescription("subtopologyId", List.of(), List.of(), Map.of(), Map.of())),
|
||||
List.of(memberDescription),
|
||||
groupState,
|
||||
new Node(1, "localhost", 9092),
|
||||
Set.of());
|
||||
KafkaFutureImpl<StreamsGroupDescription> future = new KafkaFutureImpl<>();
|
||||
future.complete(description);
|
||||
return new DescribeStreamsGroupsResult(Collections.singletonMap(groupId, future));
|
||||
return new DescribeStreamsGroupsResult(Map.of(groupId, future));
|
||||
}
|
||||
|
||||
private DescribeTopicsResult describeTopicsResult(Collection<String> topics, int numOfPartitions) {
|
||||
Map<String, TopicDescription> topicDescriptions = new HashMap<>();
|
||||
|
||||
topics.forEach(topic -> {
|
||||
List<TopicPartitionInfo> partitions = IntStream.range(0, numOfPartitions)
|
||||
.mapToObj(i -> new TopicPartitionInfo(i, null, Collections.emptyList(), Collections.emptyList()))
|
||||
.collect(Collectors.toList());
|
||||
topicDescriptions.put(topic, new TopicDescription(topic, false, partitions));
|
||||
});
|
||||
var topicDescriptions = topics.stream().collect(Collectors.toMap(Function.identity(),
|
||||
topic -> new TopicDescription(topic, false, IntStream.range(0, numOfPartitions)
|
||||
.mapToObj(i -> new TopicPartitionInfo(i, null, List.of(), List.of()))
|
||||
.toList())));
|
||||
return AdminClientTestUtils.describeTopicsResult(topicDescriptions);
|
||||
}
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ package org.apache.kafka.tools.api;
|
|||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
|
||||
public class RecordReaderTest {
|
||||
|
||||
|
@ -27,7 +27,7 @@ public class RecordReaderTest {
|
|||
void testDefaultCloseAndConfigure() {
|
||||
RecordReader reader = inputStream -> null;
|
||||
// `configure` and `close` should have default empty body
|
||||
Assertions.assertDoesNotThrow(() -> reader.configure(Collections.emptyMap()));
|
||||
Assertions.assertDoesNotThrow(() -> reader.configure(Map.of()));
|
||||
Assertions.assertDoesNotThrow(reader::close);
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue