From 2e968560e069369b8bd608e038f00f41e275909b Mon Sep 17 00:00:00 2001 From: Jhen-Yung Hsu Date: Wed, 11 Jun 2025 18:36:14 +0800 Subject: [PATCH] MINOR: Cleanup simplify set initialization with Set.of (#19925) Simplify Set initialization and reduce the overhead of creating extra collections. The changes mostly include: - new HashSet<>(List.of(...)) - new HashSet<>(Arrays.asList(...)) / new HashSet<>(asList(...)) - new HashSet<>(Collections.singletonList()) / new HashSet<>(singletonList()) - new HashSet<>(Collections.emptyList()) - new HashSet<>(Set.of()) This change takes the following into account, and we will not change to Set.of in these scenarios: - Require `mutability` (UnsupportedOperationException). - Allow `duplicate` elements (IllegalArgumentException). - Allow `null` elements (NullPointerException). - Depend on `Ordering`. `Set.of` does not guarantee order, so it could make tests flaky or break public interfaces. Reviewers: Ken Huang , PoAn Yang , Chia-Ping Tsai --- .../kafka/common/config/LogLevelConfig.java | 6 +- .../apache/kafka/clients/MetadataTest.java | 12 +- .../clients/admin/KafkaAdminClientTest.java | 28 ++-- .../admin/ListConsumerGroupsOptionsTest.java | 4 +- .../admin/internals/AdminApiDriverTest.java | 3 +- .../internals/CoordinatorStrategyTest.java | 20 +-- ...DeleteConsumerGroupOffsetsHandlerTest.java | 4 +- .../DescribeConsumerGroupsHandlerTest.java | 5 +- .../internals/ConsumerCoordinatorTest.java | 10 +- .../consumer/internals/FetchBufferTest.java | 3 +- .../internals/FetchCollectorTest.java | 3 +- .../internals/FetchRequestManagerTest.java | 8 +- .../consumer/internals/FetcherTest.java | 8 +- .../internals/OffsetsRequestManagerTest.java | 3 +- .../internals/ShareFetchBufferTest.java | 4 +- .../internals/StreamsRebalanceDataTest.java | 10 +- .../internals/SubscriptionStateTest.java | 10 +- .../metrics/AsyncConsumerMetricsTest.java | 11 +- .../internals/ProducerMetadataTest.java | 10 +- .../internals/RecordAccumulatorTest.java | 15 +- .../producer/internals/SenderTest.java | 16 +-- .../kafka/common/config/ConfigDefTest.java | 6 +- .../provider/EnvVarConfigProviderTest.java | 4 +- ...arerUnsecuredLoginCallbackHandlerTest.java | 11 +- .../auth/extension/JaasBasicAuthFilter.java | 6 +- .../kafka/connect/json/JsonConverterTest.java | 8 +- .../connect/mirror/MirrorClientTest.java | 8 +- .../mirror/MirrorCheckpointMetrics.java | 4 +- .../connect/mirror/MirrorSourceMetrics.java | 4 +- .../connect/mirror/MirrorSourceTaskTest.java | 6 +- .../MirrorConnectorsIntegrationBaseTest.java | 4 +- .../connect/runtime/isolation/Plugins.java | 3 +- .../connect/runtime/WorkerSinkTaskTest.java | 18 +-- .../runtime/WorkerSinkTaskThreadedTest.java | 7 +- .../distributed/DistributedHerderTest.java | 4 +- .../IncrementalCooperativeAssignorTest.java | 4 +- .../resources/ConnectorsResourceTest.java | 8 +- .../KafkaStatusBackingStoreFormatTest.java | 4 +- .../storage/KafkaStatusBackingStoreTest.java | 8 +- .../kafka/connect/util/KafkaBasedLogTest.java | 11 +- .../kafka/connect/util/TopicAdminTest.java | 3 +- .../kafka/connect/util/TopicCreationTest.java | 19 ++- .../util/clusters/ConnectAssertions.java | 5 +- .../util/clusters/EmbeddedKafkaCluster.java | 3 +- .../CoordinatorRuntimeMetricsImplTest.java | 7 +- .../java/kafka/security/minikdc/MiniKdc.java | 4 +- .../ReconfigurableQuorumIntegrationTest.java | 8 +- .../share/SharePartitionManagerTest.java | 4 +- .../group/classic/ClassicGroupState.java | 4 +- .../group/assignor/RangeSetTest.java | 3 +- .../group/classic/ClassicGroupTest.java | 6 +- .../metrics/GroupCoordinatorMetricsTest.java | 10 +- .../modern/consumer/ConsumerGroupTest.java | 14 +- .../group/modern/share/ShareGroupTest.java | 9 +- .../streams/assignor/MockAssignorTest.java | 4 +- .../topics/ConfiguredSubtopologyTest.java | 5 +- .../controller/AclControlManagerTest.java | 4 +- .../kafka/controller/BrokerToElrsTest.java | 3 +- .../kafka/controller/BrokersToIsrsTest.java | 3 +- .../controller/FeatureControlManagerTest.java | 7 +- .../controller/QuorumControllerTest.java | 2 +- .../ReplicationControlManagerTest.java | 8 +- .../ControllerMetadataMetricsTest.java | 6 +- .../metrics/QuorumControllerMetricsTest.java | 6 +- .../apache/kafka/image/TopicsImageTest.java | 22 +-- .../metrics/MetadataLoaderMetricsTest.java | 6 +- .../ControllerRegistrationsPublisherTest.java | 5 +- .../metrics/SnapshotEmitterMetricsTest.java | 6 +- .../apache/kafka/metadata/ReplicasTest.java | 5 +- .../MetaPropertiesEnsembleTest.java | 17 ++- .../raft/KafkaRaftClientReconfigTest.java | 4 +- .../org/apache/kafka/raft/VoterSetTest.java | 4 +- .../network/EndpointReadyFuturesTest.java | 12 +- .../kafka/security/authorizer/AclEntry.java | 11 +- .../share/ShareCoordinatorServiceTest.java | 48 +++---- .../metrics/ShareCoordinatorMetricsTest.java | 7 +- .../log/ProducerStateManagerTest.java | 7 +- ...rJoinCustomPartitionerIntegrationTest.java | 2 +- .../QueryableStateIntegrationTest.java | 4 +- .../streams/state/QueryableStoreTypes.java | 10 +- .../apache/kafka/streams/TopologyTest.java | 5 +- .../internals/KStreamKStreamJoinTest.java | 12 +- .../internals/KStreamKStreamLeftJoinTest.java | 10 +- .../KStreamKStreamOuterJoinTest.java | 10 +- .../internals/KStreamKTableJoinTest.java | 4 +- .../internals/KStreamKTableLeftJoinTest.java | 4 +- .../internals/KTableKTableInnerJoinTest.java | 4 +- .../internals/KTableKTableLeftJoinTest.java | 4 +- .../internals/KTableKTableOuterJoinTest.java | 4 +- .../internals/InternalTopicManagerTest.java | 8 +- .../internals/ProcessorNodeTest.java | 2 +- .../processor/internals/StreamTaskTest.java | 10 +- .../StreamsPartitionAssignorTest.java | 4 +- .../internals/TaskExecutionMetadataTest.java | 3 +- .../LegacyStickyTaskAssignorTest.java | 2 +- .../assignment/SubscriptionInfoTest.java | 10 +- .../AbstractRocksDBWindowStoreTest.java | 108 +++++++------- .../AbstractSessionBytesStoreTest.java | 32 ++--- .../AbstractWindowBytesStoreTest.java | 136 +++++++++--------- .../internals/InMemorySessionStoreTest.java | 5 +- .../kafka/tools/DelegationTokenCommand.java | 11 +- .../org/apache/kafka/tools/TopicCommand.java | 8 +- .../apache/kafka/tools/ConfigCommandTest.java | 12 +- .../kafka/tools/LogDirsCommandTest.java | 2 +- .../tools/MetadataQuorumCommandUnitTest.java | 10 +- .../kafka/tools/StreamsResetterTest.java | 2 +- .../apache/kafka/tools/TopicCommandTest.java | 3 +- .../group/ConsumerGroupServiceTest.java | 4 +- .../group/DescribeConsumerGroupTest.java | 3 +- .../consumer/group/ShareGroupCommandTest.java | 8 +- .../ReassignPartitionsCommandTest.java | 2 +- .../reassign/ReassignPartitionsUnitTest.java | 38 ++--- .../tools/streams/ListStreamsGroupTest.java | 2 +- .../streams/StreamsGroupCommandTest.java | 36 ++--- .../trogdor/common/StringExpanderTest.java | 23 ++- 115 files changed, 544 insertions(+), 635 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/common/config/LogLevelConfig.java b/clients/src/main/java/org/apache/kafka/common/config/LogLevelConfig.java index fe7e2eb6669..410082d908b 100644 --- a/clients/src/main/java/org/apache/kafka/common/config/LogLevelConfig.java +++ b/clients/src/main/java/org/apache/kafka/common/config/LogLevelConfig.java @@ -17,8 +17,6 @@ package org.apache.kafka.common.config; -import java.util.Arrays; -import java.util.HashSet; import java.util.Set; /** @@ -64,8 +62,8 @@ public class LogLevelConfig { */ public static final String TRACE_LOG_LEVEL = "TRACE"; - public static final Set VALID_LOG_LEVELS = new HashSet<>(Arrays.asList( + public static final Set VALID_LOG_LEVELS = Set.of( FATAL_LOG_LEVEL, ERROR_LOG_LEVEL, WARN_LOG_LEVEL, INFO_LOG_LEVEL, DEBUG_LOG_LEVEL, TRACE_LOG_LEVEL - )); + ); } diff --git a/clients/src/test/java/org/apache/kafka/clients/MetadataTest.java b/clients/src/test/java/org/apache/kafka/clients/MetadataTest.java index f15c4674896..13c378d3983 100644 --- a/clients/src/test/java/org/apache/kafka/clients/MetadataTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/MetadataTest.java @@ -957,9 +957,9 @@ public class MetadataTest { Cluster cluster = metadata.fetch(); assertEquals(cluster.clusterResource().clusterId(), oldClusterId); assertEquals(cluster.nodes().size(), oldNodes); - assertEquals(cluster.invalidTopics(), new HashSet<>(Arrays.asList("oldInvalidTopic", "keepInvalidTopic"))); - assertEquals(cluster.unauthorizedTopics(), new HashSet<>(Arrays.asList("oldUnauthorizedTopic", "keepUnauthorizedTopic"))); - assertEquals(cluster.topics(), new HashSet<>(Arrays.asList("oldValidTopic", "keepValidTopic"))); + assertEquals(cluster.invalidTopics(), Set.of("oldInvalidTopic", "keepInvalidTopic")); + assertEquals(cluster.unauthorizedTopics(), Set.of("oldUnauthorizedTopic", "keepUnauthorizedTopic")); + assertEquals(cluster.topics(), Set.of("oldValidTopic", "keepValidTopic")); assertEquals(cluster.partitionsForTopic("oldValidTopic").size(), 2); assertEquals(cluster.partitionsForTopic("keepValidTopic").size(), 3); assertEquals(new HashSet<>(cluster.topicIds()), new HashSet<>(topicIds.values())); @@ -992,9 +992,9 @@ public class MetadataTest { cluster = metadata.fetch(); assertEquals(cluster.clusterResource().clusterId(), newClusterId); assertEquals(cluster.nodes().size(), newNodes); - assertEquals(cluster.invalidTopics(), new HashSet<>(Arrays.asList("keepInvalidTopic", "newInvalidTopic"))); - assertEquals(cluster.unauthorizedTopics(), new HashSet<>(Arrays.asList("keepUnauthorizedTopic", "newUnauthorizedTopic"))); - assertEquals(cluster.topics(), new HashSet<>(Arrays.asList("keepValidTopic", "newValidTopic"))); + assertEquals(cluster.invalidTopics(), Set.of("keepInvalidTopic", "newInvalidTopic")); + assertEquals(cluster.unauthorizedTopics(), Set.of("keepUnauthorizedTopic", "newUnauthorizedTopic")); + assertEquals(cluster.topics(), Set.of("keepValidTopic", "newValidTopic")); assertEquals(cluster.partitionsForTopic("keepValidTopic").size(), 2); assertEquals(cluster.partitionsForTopic("newValidTopic").size(), 4); assertEquals(new HashSet<>(cluster.topicIds()), new HashSet<>(topicIds.values())); diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/KafkaAdminClientTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/KafkaAdminClientTest.java index eea43f3fb76..1d516cf6648 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/KafkaAdminClientTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/KafkaAdminClientTest.java @@ -390,10 +390,10 @@ public class KafkaAdminClientTest { assertNull(cluster.controller()); } assertEquals("Ek8tjqq1QBWfnaoyHFZqDg", cluster.clusterResource().clusterId()); - assertEquals(new HashSet<>(asList( + assertEquals(Set.of( new Node(0, "controller0.com", 9092), new Node(1, "controller1.com", 9092), - new Node(2, "controller2.com", 9092))), new HashSet<>(cluster.nodes())); + new Node(2, "controller2.com", 9092)), new HashSet<>(cluster.nodes())); } @Test @@ -1592,7 +1592,7 @@ public class KafkaAdminClientTest { Map topicDescriptions = result.allTopicNames().get(); TopicDescription topicDescription = topicDescriptions.get(topicName0); - assertEquals(new HashSet<>(asList(AclOperation.DESCRIBE, AclOperation.ALTER)), + assertEquals(Set.of(AclOperation.DESCRIBE, AclOperation.ALTER), topicDescription.authorizedOperations()); } } @@ -2084,7 +2084,7 @@ public class KafkaAdminClientTest { electionResults, ApiKeys.ELECT_LEADERS.latestVersion())); ElectLeadersResult results = env.adminClient().electLeaders( electionType, - new HashSet<>(asList(topic1, topic2))); + Set.of(topic1, topic2)); assertEquals(ClusterAuthorizationException.class, results.partitions().get().get(topic2).get().getClass()); // Test a call where there are no errors. By mutating the internal of election results @@ -2096,14 +2096,14 @@ public class KafkaAdminClientTest { env.kafkaClient().prepareResponse(new ElectLeadersResponse(0, Errors.NONE.code(), electionResults, ApiKeys.ELECT_LEADERS.latestVersion())); - results = env.adminClient().electLeaders(electionType, new HashSet<>(asList(topic1, topic2))); + results = env.adminClient().electLeaders(electionType, Set.of(topic1, topic2)); assertFalse(results.partitions().get().get(topic1).isPresent()); assertFalse(results.partitions().get().get(topic2).isPresent()); // Now try a timeout results = env.adminClient().electLeaders( electionType, - new HashSet<>(asList(topic1, topic2)), + Set.of(topic1, topic2), new ElectLeadersOptions().timeoutMs(100)); TestUtils.assertFutureThrows(TimeoutException.class, results.partitions()); } @@ -2127,7 +2127,7 @@ public class KafkaAdminClientTest { Map> result = env.adminClient().describeConfigs(asList( broker0Resource, broker1Resource)).values(); - assertEquals(new HashSet<>(asList(broker0Resource, broker1Resource)), result.keySet()); + assertEquals(Set.of(broker0Resource, broker1Resource), result.keySet()); result.get(broker0Resource).get(); result.get(broker1Resource).get(); } @@ -2149,7 +2149,7 @@ public class KafkaAdminClientTest { Map> result = env.adminClient().describeConfigs(asList( brokerResource, brokerLoggerResource)).values(); - assertEquals(new HashSet<>(asList(brokerResource, brokerLoggerResource)), result.keySet()); + assertEquals(Set.of(brokerResource, brokerLoggerResource), result.keySet()); result.get(brokerResource).get(); result.get(brokerLoggerResource).get(); } @@ -2168,7 +2168,7 @@ public class KafkaAdminClientTest { Map> result = env.adminClient().describeConfigs(asList( topic, topic2)).values(); - assertEquals(new HashSet<>(asList(topic, topic2)), result.keySet()); + assertEquals(Set.of(topic, topic2), result.keySet()); result.get(topic); TestUtils.assertFutureThrows(ApiException.class, result.get(topic2)); } @@ -2189,7 +2189,7 @@ public class KafkaAdminClientTest { .setConfigs(emptyList()))))); Map> result = env.adminClient().describeConfigs(singletonList( topic)).values(); - assertEquals(new HashSet<>(singletonList(topic)), result.keySet()); + assertEquals(Set.of(topic), result.keySet()); assertNotNull(result.get(topic).get()); assertNull(result.get(unrequested)); } @@ -2212,7 +2212,7 @@ public class KafkaAdminClientTest { Map> result = env.adminClient().describeConfigs(asList( resource, resource1)).values(); - assertEquals(new HashSet<>(asList(resource, resource1)), result.keySet()); + assertEquals(Set.of(resource, resource1), result.keySet()); assertNotNull(result.get(resource).get()); assertNotNull(result.get(resource1).get()); } @@ -2239,7 +2239,7 @@ public class KafkaAdminClientTest { Map> result = env.adminClient().describeConfigs(asList( resource1, resource2)).values(); - assertEquals(new HashSet<>(asList(resource1, resource2)), result.keySet()); + assertEquals(Set.of(resource1, resource2), result.keySet()); assertNotNull(result.get(resource1).get()); assertNotNull(result.get(resource2).get()); } @@ -2905,7 +2905,7 @@ public class KafkaAdminClientTest { assertEquals(env.cluster().clusterResource().clusterId(), result2.clusterId().get()); assertEquals(new HashSet<>(env.cluster().nodes()), new HashSet<>(result2.nodes().get())); assertEquals(3, result2.controller().get().id()); - assertEquals(new HashSet<>(asList(AclOperation.DESCRIBE, AclOperation.ALTER)), + assertEquals(Set.of(AclOperation.DESCRIBE, AclOperation.ALTER), result2.authorizedOperations().get()); } } @@ -7922,7 +7922,7 @@ public class KafkaAdminClientTest { .setErrorMessage(Errors.UNKNOWN_TOPIC_OR_PARTITION.message()); env.kafkaClient().prepareResponse(new ListPartitionReassignmentsResponse(unknownTpData)); - ListPartitionReassignmentsResult unknownTpResult = env.adminClient().listPartitionReassignments(new HashSet<>(asList(tp1, tp2))); + ListPartitionReassignmentsResult unknownTpResult = env.adminClient().listPartitionReassignments(Set.of(tp1, tp2)); TestUtils.assertFutureThrows(UnknownTopicOrPartitionException.class, unknownTpResult.reassignments()); // 3. Success diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/ListConsumerGroupsOptionsTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/ListConsumerGroupsOptionsTest.java index 75d6c1c88c5..f20d6e56c95 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/ListConsumerGroupsOptionsTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/ListConsumerGroupsOptionsTest.java @@ -20,8 +20,6 @@ import org.apache.kafka.common.ConsumerGroupState; import org.junit.jupiter.api.Test; -import java.util.Arrays; -import java.util.HashSet; import java.util.Set; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -29,7 +27,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; public class ListConsumerGroupsOptionsTest { @Test public void testState() { - Set consumerGroupStates = new HashSet<>(Arrays.asList(ConsumerGroupState.values())); + Set consumerGroupStates = Set.of(ConsumerGroupState.values()); ListConsumerGroupsOptions options = new ListConsumerGroupsOptions().inStates(consumerGroupStates); assertEquals(consumerGroupStates, options.states()); } diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/AdminApiDriverTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/AdminApiDriverTest.java index 7c87f21c643..c4ffc657914 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/AdminApiDriverTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/AdminApiDriverTest.java @@ -40,7 +40,6 @@ import org.junit.jupiter.api.Test; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; @@ -397,7 +396,7 @@ class AdminApiDriverTest { public void testRetryLookupAndDisableBatchAfterNoBatchedFindCoordinatorsException() { MockTime time = new MockTime(); LogContext lc = new LogContext(); - Set groupIds = new HashSet<>(Arrays.asList("g1", "g2")); + Set groupIds = Set.of("g1", "g2"); DeleteConsumerGroupsHandler handler = new DeleteConsumerGroupsHandler(lc); AdminApiFuture future = AdminApiFuture.forKeys( groupIds.stream().map(CoordinatorKey::byGroupId).collect(Collectors.toSet())); diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/CoordinatorStrategyTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/CoordinatorStrategyTest.java index 8cd9545107f..cbbbe93e2d4 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/CoordinatorStrategyTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/CoordinatorStrategyTest.java @@ -55,9 +55,9 @@ public class CoordinatorStrategyTest { @Test public void testBuildLookupRequest() { CoordinatorStrategy strategy = new CoordinatorStrategy(CoordinatorType.GROUP, new LogContext()); - FindCoordinatorRequest.Builder request = strategy.buildRequest(new HashSet<>(Arrays.asList( + FindCoordinatorRequest.Builder request = strategy.buildRequest(Set.of( CoordinatorKey.byGroupId("foo"), - CoordinatorKey.byGroupId("bar")))); + CoordinatorKey.byGroupId("bar"))); assertEquals("", request.data().key()); assertEquals(2, request.data().coordinatorKeys().size()); assertEquals(CoordinatorType.GROUP, CoordinatorType.forId(request.data().keyType())); @@ -67,8 +67,8 @@ public class CoordinatorStrategyTest { public void testBuildLookupRequestNonRepresentable() { CoordinatorStrategy strategy = new CoordinatorStrategy(CoordinatorType.GROUP, new LogContext()); FindCoordinatorRequest.Builder request = strategy.buildRequest(new HashSet<>(Arrays.asList( - CoordinatorKey.byGroupId("foo"), - null))); + CoordinatorKey.byGroupId("foo"), + null))); assertEquals("", request.data().key()); assertEquals(1, request.data().coordinatorKeys().size()); } @@ -90,7 +90,7 @@ public class CoordinatorStrategyTest { strategy.disableBatch(); assertThrows(IllegalArgumentException.class, () -> strategy.buildRequest( - new HashSet<>(Collections.singletonList(CoordinatorKey.byTransactionalId("txnid"))))); + Set.of(CoordinatorKey.byTransactionalId("txnid")))); } @Test @@ -105,9 +105,9 @@ public class CoordinatorStrategyTest { CoordinatorStrategy strategy = new CoordinatorStrategy(CoordinatorType.GROUP, new LogContext()); assertThrows(IllegalArgumentException.class, () -> strategy.buildRequest( - new HashSet<>(Arrays.asList( - CoordinatorKey.byGroupId("group"), - CoordinatorKey.byTransactionalId("txnid"))))); + Set.of( + CoordinatorKey.byGroupId("group"), + CoordinatorKey.byTransactionalId("txnid")))); } @Test @@ -161,7 +161,7 @@ public class CoordinatorStrategyTest { .setPort(9092) .setNodeId(2))); - AdminApiLookupStrategy.LookupResult result = runLookup(new HashSet<>(Arrays.asList(group1, group2)), responseData); + AdminApiLookupStrategy.LookupResult result = runLookup(Set.of(group1, group2), responseData); Map expectedResult = new HashMap<>(); expectedResult.put(group1, 1); expectedResult.put(group2, 2); @@ -204,7 +204,7 @@ public class CoordinatorStrategyTest { .setHost("localhost") .setPort(9092) .setNodeId(2))); - AdminApiLookupStrategy.LookupResult result = runLookup(new HashSet<>(Arrays.asList(group1, group2)), responseData); + AdminApiLookupStrategy.LookupResult result = runLookup(Set.of(group1, group2), responseData); assertEquals(emptyMap(), result.failedKeys); assertEquals(singletonMap(group2, 2), result.mappedKeys); diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/DeleteConsumerGroupOffsetsHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/DeleteConsumerGroupOffsetsHandlerTest.java index e975b2acbae..5d14529915a 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/DeleteConsumerGroupOffsetsHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/DeleteConsumerGroupOffsetsHandlerTest.java @@ -34,10 +34,8 @@ import org.apache.kafka.common.utils.LogContext; import org.junit.jupiter.api.Test; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.HashSet; import java.util.Map; import java.util.Set; @@ -55,7 +53,7 @@ public class DeleteConsumerGroupOffsetsHandlerTest { private final TopicPartition t0p0 = new TopicPartition("t0", 0); private final TopicPartition t0p1 = new TopicPartition("t0", 1); private final TopicPartition t1p0 = new TopicPartition("t1", 0); - private final Set tps = new HashSet<>(Arrays.asList(t0p0, t0p1, t1p0)); + private final Set tps = Set.of(t0p0, t0p1, t1p0); @Test public void testBuildRequest() { diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/DescribeConsumerGroupsHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/DescribeConsumerGroupsHandlerTest.java index 444795b3680..eb3e99dc621 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/DescribeConsumerGroupsHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/DescribeConsumerGroupsHandlerTest.java @@ -53,7 +53,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashSet; import java.util.List; @@ -82,10 +81,10 @@ public class DescribeConsumerGroupsHandlerTest { CoordinatorKey.byGroupId(groupId2) )); private final Node coordinator = new Node(1, "host", 1234); - private final Set tps = new HashSet<>(Arrays.asList( + private final Set tps = Set.of( new TopicPartition("foo", 0), new TopicPartition("bar", 1) - )); + ); @ParameterizedTest @ValueSource(booleans = {true, false}) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinatorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinatorTest.java index 683a25a3e1c..c460a9f7608 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinatorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinatorTest.java @@ -333,7 +333,7 @@ public abstract class ConsumerCoordinatorTest { List> capturedTopics = topicsCaptor.getAllValues(); // expected the final group subscribed topics to be updated to "topic1" and "topic2" - Set expectedTopicsGotCalled = new HashSet<>(Arrays.asList(topic1, topic2)); + Set expectedTopicsGotCalled = Set.of(topic1, topic2); assertEquals(expectedTopicsGotCalled, capturedTopics.get(1)); } } @@ -1279,7 +1279,7 @@ public abstract class ConsumerCoordinatorTest { coordinator.poll(time.timer(Long.MAX_VALUE)); // Make sure that the metadata was refreshed during the rebalance and thus subscriptions now contain two topics. - final Set updatedSubscriptionSet = new HashSet<>(Arrays.asList(topic1, topic2)); + final Set updatedSubscriptionSet = Set.of(topic1, topic2); assertEquals(updatedSubscriptionSet, subscriptions.subscription()); // Refresh the metadata again. Since there have been no changes since the last refresh, it won't trigger @@ -1300,7 +1300,7 @@ public abstract class ConsumerCoordinatorTest { } })); coordinator.maybeUpdateSubscriptionMetadata(); - assertEquals(new HashSet<>(Arrays.asList(topic1, topic2)), subscriptions.subscription()); + assertEquals(Set.of(topic1, topic2), subscriptions.subscription()); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE)); @@ -2072,7 +2072,7 @@ public abstract class ConsumerCoordinatorTest { coordinator.poll(time.timer(Long.MAX_VALUE)); assertFalse(coordinator.rejoinNeededOrPending()); - assertEquals(new HashSet<>(Arrays.asList(tp1, tp2)), subscriptions.assignedPartitions()); + assertEquals(Set.of(tp1, tp2), subscriptions.assignedPartitions()); } /** @@ -2264,7 +2264,7 @@ public abstract class ConsumerCoordinatorTest { // and join the group again rebalanceListener.revoked = null; rebalanceListener.assigned = null; - subscriptions.subscribe(new HashSet<>(Arrays.asList(topic1, otherTopic)), Optional.of(rebalanceListener)); + subscriptions.subscribe(Set.of(topic1, otherTopic), Optional.of(rebalanceListener)); client.prepareResponse(joinGroupFollowerResponse(2, consumerId, "leader", Errors.NONE)); client.prepareResponse(syncGroupResponse(assigned, Errors.NONE)); coordinator.joinGroupIfNeeded(time.timer(Long.MAX_VALUE)); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchBufferTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchBufferTest.java index b3c74293231..9d6b0c2da88 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchBufferTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchBufferTest.java @@ -32,7 +32,6 @@ import org.junit.jupiter.api.Test; import java.time.Duration; import java.util.Arrays; -import java.util.HashSet; import java.util.Properties; import java.util.Set; @@ -204,6 +203,6 @@ public class FetchBufferTest { * This is a handy utility method for returning a set from a varargs array. */ private static Set partitions(TopicPartition... partitions) { - return new HashSet<>(Arrays.asList(partitions)); + return Set.of(partitions); } } \ No newline at end of file diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchCollectorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchCollectorTest.java index 915c9ea9cfa..c2b4e6ca4c8 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchCollectorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchCollectorTest.java @@ -53,7 +53,6 @@ import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; -import java.util.HashSet; import java.util.List; import java.util.Optional; import java.util.Properties; @@ -721,7 +720,7 @@ public class FetchCollectorTest { * This is a handy utility method for returning a set from a varargs array. */ private static Set partitions(TopicPartition... partitions) { - return new HashSet<>(Arrays.asList(partitions)); + return Set.of(partitions); } private void buildDependencies() { diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java index 7d0325e8e5d..0f83e28d9e4 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java @@ -2479,7 +2479,7 @@ public class FetchRequestManagerTest { assertTrue(fetchedRecords.containsKey(tp0)); assertEquals(fetchedRecords.get(tp0).size(), 2); List> fetchedConsumerRecords = fetchedRecords.get(tp0); - Set expectedCommittedKeys = new HashSet<>(Arrays.asList("commit1-1", "commit1-2")); + Set expectedCommittedKeys = Set.of("commit1-1", "commit1-2"); Set actuallyCommittedKeys = new HashSet<>(); for (ConsumerRecord consumerRecord : fetchedConsumerRecords) { actuallyCommittedKeys.add(new String(consumerRecord.key(), StandardCharsets.UTF_8)); @@ -2741,7 +2741,7 @@ public class FetchRequestManagerTest { public void testConsumingViaIncrementalFetchRequests() { buildFetcher(2); - assignFromUser(new HashSet<>(Arrays.asList(tp0, tp1))); + assignFromUser(Set.of(tp0, tp1)); subscriptions.seekValidated(tp0, new SubscriptionState.FetchPosition(0, Optional.empty(), metadata.currentLeader(tp0))); subscriptions.seekValidated(tp1, new SubscriptionState.FetchPosition(1, Optional.empty(), metadata.currentLeader(tp1))); @@ -3196,7 +3196,7 @@ public class FetchRequestManagerTest { // Setup so that tp0 & tp1 are subscribed and will be fetched from. // Also, setup client's metadata for tp0 & tp1. - subscriptions.assignFromUser(new HashSet<>(Arrays.asList(tp0, tp1))); + subscriptions.assignFromUser(Set.of(tp0, tp1)); client.updateMetadata( RequestTestUtils.metadataUpdateWithIds(2, singletonMap(topicName, 4), tp -> validLeaderEpoch, topicIds, false)); @@ -3289,7 +3289,7 @@ public class FetchRequestManagerTest { // Setup so that tp0 & tp1 are subscribed and will be fetched from. // Also, setup client's metadata for tp0 & tp1. - subscriptions.assignFromUser(new HashSet<>(Arrays.asList(tp0, tp1))); + subscriptions.assignFromUser(Set.of(tp0, tp1)); client.updateMetadata( RequestTestUtils.metadataUpdateWithIds(2, singletonMap(topicName, 4), tp -> validLeaderEpoch, topicIds, false)); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetcherTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetcherTest.java index a09024fb144..b85daebb8d8 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetcherTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetcherTest.java @@ -2466,7 +2466,7 @@ public class FetcherTest { assertTrue(fetchedRecords.containsKey(tp0)); assertEquals(fetchedRecords.get(tp0).size(), 2); List> fetchedConsumerRecords = fetchedRecords.get(tp0); - Set expectedCommittedKeys = new HashSet<>(Arrays.asList("commit1-1", "commit1-2")); + Set expectedCommittedKeys = Set.of("commit1-1", "commit1-2"); Set actuallyCommittedKeys = new HashSet<>(); for (ConsumerRecord consumerRecord : fetchedConsumerRecords) { actuallyCommittedKeys.add(new String(consumerRecord.key(), StandardCharsets.UTF_8)); @@ -2728,7 +2728,7 @@ public class FetcherTest { public void testConsumingViaIncrementalFetchRequests() { buildFetcher(2); - assignFromUser(new HashSet<>(Arrays.asList(tp0, tp1))); + assignFromUser(Set.of(tp0, tp1)); subscriptions.seekValidated(tp0, new SubscriptionState.FetchPosition(0, Optional.empty(), metadata.currentLeader(tp0))); subscriptions.seekValidated(tp1, new SubscriptionState.FetchPosition(1, Optional.empty(), metadata.currentLeader(tp1))); @@ -3473,7 +3473,7 @@ public class FetcherTest { // Setup so that tp0 & tp1 are subscribed and will be fetched from. // Also, setup client's metadata for tp0 & tp1. - subscriptions.assignFromUser(new HashSet<>(Arrays.asList(tp0, tp1))); + subscriptions.assignFromUser(Set.of(tp0, tp1)); client.updateMetadata( RequestTestUtils.metadataUpdateWithIds(2, singletonMap(topicName, 4), tp -> validLeaderEpoch, topicIds, false)); @@ -3566,7 +3566,7 @@ public class FetcherTest { // Setup so that tp0 & tp1 are subscribed and will be fetched from. // Also, setup client's metadata for tp0 & tp1. - subscriptions.assignFromUser(new HashSet<>(Arrays.asList(tp0, tp1))); + subscriptions.assignFromUser(Set.of(tp0, tp1)); client.updateMetadata( RequestTestUtils.metadataUpdateWithIds(2, singletonMap(topicName, 4), tp -> validLeaderEpoch, topicIds, false)); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManagerTest.java index cfbf13a1dab..ed96b817900 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManagerTest.java @@ -51,7 +51,6 @@ import org.junit.jupiter.params.provider.MethodSource; import org.mockito.ArgumentCaptor; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -774,7 +773,7 @@ public class OffsetsRequestManagerTest { // tp2 added to the assignment when the Offset Fetch request is already sent including tp1 only TopicPartition tp2 = new TopicPartition("topic2", 2); - Set initPartitions2 = new HashSet<>(Arrays.asList(tp1, tp2)); + Set initPartitions2 = Set.of(tp1, tp2); mockAssignedPartitionsMissingPositions(initPartitions2, initPartitions2, leaderAndEpoch); // tp2 requires a position, but shouldn't be reset after receiving the offset fetch response that will only diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareFetchBufferTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareFetchBufferTest.java index f7039e838b7..2a06324f72a 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareFetchBufferTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareFetchBufferTest.java @@ -33,8 +33,6 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import java.time.Duration; -import java.util.Arrays; -import java.util.HashSet; import java.util.Properties; import java.util.Set; import java.util.stream.Collectors; @@ -181,6 +179,6 @@ public class ShareFetchBufferTest { * This is a handy utility method for returning a set from a varargs array. */ private static Set partitions(TopicIdPartition... partitions) { - return new HashSet<>(Arrays.asList(partitions)); + return Set.of(partitions); } } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/StreamsRebalanceDataTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/StreamsRebalanceDataTest.java index a83b0ae2d23..9607a0e9e20 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/StreamsRebalanceDataTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/StreamsRebalanceDataTest.java @@ -90,9 +90,9 @@ public class StreamsRebalanceDataTest { @Test public void assignmentShouldNotBeModifiable() { final StreamsRebalanceData.Assignment assignment = new StreamsRebalanceData.Assignment( - new HashSet<>(Set.of(new StreamsRebalanceData.TaskId("subtopologyId1", 1))), - new HashSet<>(Set.of(new StreamsRebalanceData.TaskId("subtopologyId1", 2))), - new HashSet<>(Set.of(new StreamsRebalanceData.TaskId("subtopologyId1", 3))) + Set.of(new StreamsRebalanceData.TaskId("subtopologyId1", 1)), + Set.of(new StreamsRebalanceData.TaskId("subtopologyId1", 2)), + Set.of(new StreamsRebalanceData.TaskId("subtopologyId1", 3)) ); assertThrows( @@ -220,8 +220,8 @@ public class StreamsRebalanceDataTest { @Test public void subtopologyShouldNotBeModifiable() { final StreamsRebalanceData.Subtopology subtopology = new StreamsRebalanceData.Subtopology( - new HashSet<>(Set.of("sourceTopic1")), - new HashSet<>(Set.of("repartitionSinkTopic1")), + Set.of("sourceTopic1"), + Set.of("repartitionSinkTopic1"), Map.of("repartitionSourceTopic1", new StreamsRebalanceData.TopicInfo(Optional.of(1), Optional.of((short) 1), Map.of())) .entrySet().stream() .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)), diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/SubscriptionStateTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/SubscriptionStateTest.java index f697990b544..ff7937a835d 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/SubscriptionStateTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/SubscriptionStateTest.java @@ -33,10 +33,8 @@ import org.apache.kafka.test.TestUtils; import org.junit.jupiter.api.Test; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.HashSet; import java.util.Optional; import java.util.Set; import java.util.function.LongSupplier; @@ -80,7 +78,7 @@ public class SubscriptionStateTest { @Test public void partitionAssignmentChangeOnTopicSubscription() { - state.assignFromUser(new HashSet<>(Arrays.asList(tp0, tp1))); + state.assignFromUser(Set.of(tp0, tp1)); // assigned partitions should immediately change assertEquals(2, state.assignedPartitions().size()); assertEquals(2, state.numAssignedPartitions()); @@ -394,7 +392,7 @@ public class SubscriptionStateTest { @Test public void patternSubscription() { state.subscribe(Pattern.compile(".*"), Optional.of(rebalanceListener)); - state.subscribeFromPattern(new HashSet<>(Arrays.asList(topic, topic1))); + state.subscribeFromPattern(Set.of(topic, topic1)); assertEquals(2, state.subscription().size(), "Expected subscribed topics count is incorrect"); } @@ -434,7 +432,7 @@ public class SubscriptionStateTest { @Test public void unsubscribeUserAssignment() { - state.assignFromUser(new HashSet<>(Arrays.asList(tp0, tp1))); + state.assignFromUser(Set.of(tp0, tp1)); state.unsubscribe(); state.subscribe(singleton(topic), Optional.of(rebalanceListener)); assertEquals(singleton(topic), state.subscription()); @@ -452,7 +450,7 @@ public class SubscriptionStateTest { @Test public void unsubscription() { state.subscribe(Pattern.compile(".*"), Optional.of(rebalanceListener)); - state.subscribeFromPattern(new HashSet<>(Arrays.asList(topic, topic1))); + state.subscribeFromPattern(Set.of(topic, topic1)); assertTrue(state.checkAssignmentMatchedSubscription(singleton(tp1))); state.assignFromSubscribed(singleton(tp1)); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/metrics/AsyncConsumerMetricsTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/metrics/AsyncConsumerMetricsTest.java index 2913bcfad70..27315068e10 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/metrics/AsyncConsumerMetricsTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/metrics/AsyncConsumerMetricsTest.java @@ -22,8 +22,7 @@ import org.apache.kafka.common.metrics.Metrics; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; -import java.util.Arrays; -import java.util.HashSet; +import java.util.Set; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -48,14 +47,14 @@ public class AsyncConsumerMetricsTest { public void shouldMetricNames() { // create consumerMetrics = new AsyncConsumerMetrics(metrics); - HashSet expectedMetrics = new HashSet<>(Arrays.asList( + Set expectedMetrics = Set.of( metrics.metricName("last-poll-seconds-ago", CONSUMER_METRIC_GROUP), metrics.metricName("time-between-poll-avg", CONSUMER_METRIC_GROUP), metrics.metricName("time-between-poll-max", CONSUMER_METRIC_GROUP), metrics.metricName("poll-idle-ratio-avg", CONSUMER_METRIC_GROUP), metrics.metricName("commit-sync-time-ns-total", CONSUMER_METRIC_GROUP), metrics.metricName("committed-time-ns-total", CONSUMER_METRIC_GROUP) - )); + ); expectedMetrics.forEach( metricName -> assertTrue( metrics.metrics().containsKey(metricName), @@ -63,7 +62,7 @@ public class AsyncConsumerMetricsTest { ) ); - HashSet expectedConsumerMetrics = new HashSet<>(Arrays.asList( + Set expectedConsumerMetrics = Set.of( metrics.metricName("time-between-network-thread-poll-avg", CONSUMER_METRIC_GROUP), metrics.metricName("time-between-network-thread-poll-max", CONSUMER_METRIC_GROUP), metrics.metricName("application-event-queue-size", CONSUMER_METRIC_GROUP), @@ -79,7 +78,7 @@ public class AsyncConsumerMetricsTest { metrics.metricName("background-event-queue-time-max", CONSUMER_METRIC_GROUP), metrics.metricName("background-event-queue-processing-time-avg", CONSUMER_METRIC_GROUP), metrics.metricName("background-event-queue-processing-time-max", CONSUMER_METRIC_GROUP) - )); + ); expectedConsumerMetrics.forEach( metricName -> assertTrue( metrics.metrics().containsKey(metricName), diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/internals/ProducerMetadataTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/internals/ProducerMetadataTest.java index 64a1b41a14e..bec0eb2fcff 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/internals/ProducerMetadataTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/internals/ProducerMetadataTest.java @@ -29,10 +29,8 @@ import org.apache.kafka.common.utils.Time; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicReference; @@ -257,14 +255,14 @@ public class ProducerMetadataTest { assertTrue(metadata.updateRequested()); assertEquals(0, metadata.timeToNextUpdate(now)); - assertEquals(metadata.topics(), new HashSet<>(Arrays.asList(topic1, topic2, topic3))); - assertEquals(metadata.newTopics(), new HashSet<>(Arrays.asList(topic2, topic3))); + assertEquals(metadata.topics(), Set.of(topic1, topic2, topic3)); + assertEquals(metadata.newTopics(), Set.of(topic2, topic3)); // Perform the partial update for a subset of the new topics. now += 1000; assertTrue(metadata.updateRequested()); metadata.updateWithCurrentRequestVersion(responseWithTopics(Collections.singleton(topic2)), true, now); - assertEquals(metadata.topics(), new HashSet<>(Arrays.asList(topic1, topic2, topic3))); + assertEquals(metadata.topics(), Set.of(topic1, topic2, topic3)); assertEquals(metadata.newTopics(), Collections.singleton(topic3)); } @@ -302,7 +300,7 @@ public class ProducerMetadataTest { // Perform the full update. This should clear the update request. now += 1000; - metadata.updateWithCurrentRequestVersion(responseWithTopics(new HashSet<>(Arrays.asList(topic1, topic2))), false, now); + metadata.updateWithCurrentRequestVersion(responseWithTopics(Set.of(topic1, topic2)), false, now); assertFalse(metadata.updateRequested()); } diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/internals/RecordAccumulatorTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/internals/RecordAccumulatorTest.java index 7c2d791ea5c..ce01460e6ed 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/internals/RecordAccumulatorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/internals/RecordAccumulatorTest.java @@ -59,7 +59,6 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Deque; -import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -167,7 +166,7 @@ public class RecordAccumulatorTest { accum.append(topic, partition4, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); // drain batches from 2 nodes: node1 => tp1, node2 => tp3, because the max request size is full after the first batch drained - Map> batches1 = accum.drain(metadataCache, new HashSet<>(Arrays.asList(node1, node2)), (int) batchSize, 0); + Map> batches1 = accum.drain(metadataCache, Set.of(node1, node2), (int) batchSize, 0); verifyTopicPartitionInBatches(batches1, tp1, tp3); // add record for tp1, tp3 @@ -176,11 +175,11 @@ public class RecordAccumulatorTest { // drain batches from 2 nodes: node1 => tp2, node2 => tp4, because the max request size is full after the first batch drained // The drain index should start from next topic partition, that is, node1 => tp2, node2 => tp4 - Map> batches2 = accum.drain(metadataCache, new HashSet<>(Arrays.asList(node1, node2)), (int) batchSize, 0); + Map> batches2 = accum.drain(metadataCache, Set.of(node1, node2), (int) batchSize, 0); verifyTopicPartitionInBatches(batches2, tp2, tp4); // make sure in next run, the drain index will start from the beginning - Map> batches3 = accum.drain(metadataCache, new HashSet<>(Arrays.asList(node1, node2)), (int) batchSize, 0); + Map> batches3 = accum.drain(metadataCache, Set.of(node1, node2), (int) batchSize, 0); verifyTopicPartitionInBatches(batches3, tp1, tp3); // add record for tp2, tp3, tp4 and mute the tp4 @@ -189,7 +188,7 @@ public class RecordAccumulatorTest { accum.append(topic, partition4, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); accum.mutePartition(tp4); // drain batches from 2 nodes: node1 => tp2, node2 => tp3 (because tp4 is muted) - Map> batches4 = accum.drain(metadataCache, new HashSet<>(Arrays.asList(node1, node2)), (int) batchSize, 0); + Map> batches4 = accum.drain(metadataCache, Set.of(node1, node2), (int) batchSize, 0); verifyTopicPartitionInBatches(batches4, tp2, tp3); // add record for tp1, tp2, tp3, and unmute tp4 @@ -198,7 +197,7 @@ public class RecordAccumulatorTest { accum.append(topic, partition3, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); accum.unmutePartition(tp4); // set maxSize as a max value, so that the all partitions in 2 nodes should be drained: node1 => [tp1, tp2], node2 => [tp3, tp4] - Map> batches5 = accum.drain(metadataCache, new HashSet<>(Arrays.asList(node1, node2)), Integer.MAX_VALUE, 0); + Map> batches5 = accum.drain(metadataCache, Set.of(node1, node2), Integer.MAX_VALUE, 0); verifyTopicPartitionInBatches(batches5, tp1, tp2, tp3, tp4); } @@ -1430,7 +1429,7 @@ public class RecordAccumulatorTest { // Try to drain from node1, it should return no batches. Map> batches = accum.drain(metadataCache, - new HashSet<>(Collections.singletonList(node1)), 999999 /* maxSize */, now); + Set.of(node1), 999999 /* maxSize */, now); assertTrue(batches.containsKey(node1.id()) && batches.get(node1.id()).isEmpty(), "No batches ready to be drained on Node1"); } @@ -1511,7 +1510,7 @@ public class RecordAccumulatorTest { // Drain for node2, it should return 0 batches, Map> batches = accum.drain(metadataCache, - new HashSet<>(Collections.singletonList(node2)), 999999 /* maxSize */, time.milliseconds()); + Set.of(node2), 999999 /* maxSize */, time.milliseconds()); assertTrue(batches.get(node2.id()).isEmpty()); } diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/internals/SenderTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/internals/SenderTest.java index f9f94af1806..6b2d50a52cc 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/internals/SenderTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/internals/SenderTest.java @@ -3313,8 +3313,8 @@ public class SenderTest { int tp0LeaderEpoch = 100; int epoch = tp0LeaderEpoch; this.client.updateMetadata( - RequestTestUtils.metadataUpdateWithIds(1, new HashSet<>(Arrays.asList(new TopicIdPartition(TOPIC_ID, tp0), - new TopicIdPartition(TOPIC_ID, tp1))), + RequestTestUtils.metadataUpdateWithIds(1, Set.of(new TopicIdPartition(TOPIC_ID, tp0), + new TopicIdPartition(TOPIC_ID, tp1)), tp -> { if (tp0.equals(tp)) { return epoch; @@ -3341,8 +3341,8 @@ public class SenderTest { // Update leader epoch for tp0 int newEpoch = ++tp0LeaderEpoch; this.client.updateMetadata( - RequestTestUtils.metadataUpdateWithIds(1, new HashSet<>(Arrays.asList(new TopicIdPartition(TOPIC_ID, tp0), - new TopicIdPartition(TOPIC_ID, tp1))), + RequestTestUtils.metadataUpdateWithIds(1, Set.of(new TopicIdPartition(TOPIC_ID, tp0), + new TopicIdPartition(TOPIC_ID, tp1)), tp -> { if (tp0.equals(tp)) { return newEpoch; @@ -3429,8 +3429,8 @@ public class SenderTest { int tp1LeaderEpoch = 200; int tp2LeaderEpoch = 300; this.client.updateMetadata( - RequestTestUtils.metadataUpdateWithIds(1, new HashSet<>(Arrays.asList(new TopicIdPartition(TOPIC_ID, tp0), - new TopicIdPartition(TOPIC_ID, tp1), new TopicIdPartition(TOPIC_ID, tp2))), + RequestTestUtils.metadataUpdateWithIds(1, Set.of(new TopicIdPartition(TOPIC_ID, tp0), + new TopicIdPartition(TOPIC_ID, tp1), new TopicIdPartition(TOPIC_ID, tp2)), tp -> { if (tp0.equals(tp)) { return tp0LeaderEpoch; @@ -3509,8 +3509,8 @@ public class SenderTest { int tp1LeaderEpoch = 200; int tp2LeaderEpoch = 300; this.client.updateMetadata( - RequestTestUtils.metadataUpdateWithIds(1, new HashSet<>(Arrays.asList(new TopicIdPartition(TOPIC_ID, tp0), - new TopicIdPartition(TOPIC_ID, tp1), new TopicIdPartition(TOPIC_ID, tp2))), + RequestTestUtils.metadataUpdateWithIds(1, Set.of(new TopicIdPartition(TOPIC_ID, tp0), + new TopicIdPartition(TOPIC_ID, tp1), new TopicIdPartition(TOPIC_ID, tp2)), tp -> { if (tp0.equals(tp)) { return tp0LeaderEpoch; diff --git a/clients/src/test/java/org/apache/kafka/common/config/ConfigDefTest.java b/clients/src/test/java/org/apache/kafka/common/config/ConfigDefTest.java index 6e1f0e23242..65bd1c53634 100644 --- a/clients/src/test/java/org/apache/kafka/common/config/ConfigDefTest.java +++ b/clients/src/test/java/org/apache/kafka/common/config/ConfigDefTest.java @@ -416,7 +416,7 @@ public class ConfigDefTest { .define("a", Type.STRING, Importance.LOW, "docs") .define("b", Type.STRING, Importance.LOW, "docs"); Set names = configDef.names(); - assertEquals(new HashSet<>(Arrays.asList("a", "b")), names); + assertEquals(Set.of("a", "b"), names); // should be unmodifiable try { names.add("new"); @@ -439,13 +439,13 @@ public class ConfigDefTest { // Creating a ConfigDef based on another should compute the correct number of configs with no parent, even // if the base ConfigDef has already computed its parentless configs final ConfigDef baseConfigDef = new ConfigDef().define("a", Type.STRING, Importance.LOW, "docs"); - assertEquals(new HashSet<>(singletonList("a")), baseConfigDef.getConfigsWithNoParent()); + assertEquals(Set.of("a"), baseConfigDef.getConfigsWithNoParent()); final ConfigDef configDef = new ConfigDef(baseConfigDef) .define("parent", Type.STRING, Importance.HIGH, "parent docs", "group", 1, Width.LONG, "Parent", singletonList("child")) .define("child", Type.STRING, Importance.HIGH, "docs"); - assertEquals(new HashSet<>(Arrays.asList("a", "parent")), configDef.getConfigsWithNoParent()); + assertEquals(Set.of("a", "parent"), configDef.getConfigsWithNoParent()); } diff --git a/clients/src/test/java/org/apache/kafka/common/config/provider/EnvVarConfigProviderTest.java b/clients/src/test/java/org/apache/kafka/common/config/provider/EnvVarConfigProviderTest.java index bbd2268e7cb..9a31a63915d 100644 --- a/clients/src/test/java/org/apache/kafka/common/config/provider/EnvVarConfigProviderTest.java +++ b/clients/src/test/java/org/apache/kafka/common/config/provider/EnvVarConfigProviderTest.java @@ -22,10 +22,8 @@ import org.apache.kafka.common.config.ConfigException; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.Map; import java.util.Set; @@ -88,7 +86,7 @@ class EnvVarConfigProviderTest { @Test void testGetEnvVarsByKeyList() { - Set keyList = new HashSet<>(Arrays.asList("test_var1", "secret_var2")); + Set keyList = Set.of("test_var1", "secret_var2"); Set keys = envVarConfigProvider.get(null, keyList).data().keySet(); assertEquals(keyList, keys); } diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredLoginCallbackHandlerTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredLoginCallbackHandlerTest.java index 097a14366d8..abbe2ef28f9 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredLoginCallbackHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredLoginCallbackHandlerTest.java @@ -31,6 +31,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import javax.security.auth.callback.Callback; import javax.security.auth.callback.UnsupportedCallbackException; @@ -86,7 +87,7 @@ public class OAuthBearerUnsecuredLoginCallbackHandlerTest { assertNotNull(jws, "create token failed"); long startMs = mockTime.milliseconds(); confirmCorrectValues(jws, user, startMs, 1000 * 60 * 60); - assertEquals(new HashSet<>(Arrays.asList("sub", "iat", "exp")), jws.claims().keySet()); + assertEquals(Set.of("sub", "iat", "exp"), jws.claims().keySet()); } @SuppressWarnings("unchecked") @@ -123,11 +124,11 @@ public class OAuthBearerUnsecuredLoginCallbackHandlerTest { long startMs = mockTime.milliseconds(); confirmCorrectValues(jws, user, startMs, lifetimeSeconds * 1000); Map claims = jws.claims(); - assertEquals(new HashSet<>(Arrays.asList(actualScopeClaimName, principalClaimName, "iat", "exp", "number", - "list", "emptyList1", "emptyList2")), claims.keySet()); - assertEquals(new HashSet<>(Arrays.asList(explicitScope1, explicitScope2)), + assertEquals(Set.of(actualScopeClaimName, principalClaimName, "iat", "exp", "number", + "list", "emptyList1", "emptyList2"), claims.keySet()); + assertEquals(Set.of(explicitScope1, explicitScope2), new HashSet<>((List) claims.get(actualScopeClaimName))); - assertEquals(new HashSet<>(Arrays.asList(explicitScope1, explicitScope2)), jws.scope()); + assertEquals(Set.of(explicitScope1, explicitScope2), jws.scope()); assertEquals(1.0, jws.claim("number", Number.class)); assertEquals(Arrays.asList("1", "2", ""), jws.claim("list", List.class)); assertEquals(Collections.emptyList(), jws.claim("emptyList1", List.class)); diff --git a/connect/basic-auth-extension/src/main/java/org/apache/kafka/connect/rest/basic/auth/extension/JaasBasicAuthFilter.java b/connect/basic-auth-extension/src/main/java/org/apache/kafka/connect/rest/basic/auth/extension/JaasBasicAuthFilter.java index d404bdc7dc1..5c1b0ee4540 100644 --- a/connect/basic-auth-extension/src/main/java/org/apache/kafka/connect/rest/basic/auth/extension/JaasBasicAuthFilter.java +++ b/connect/basic-auth-extension/src/main/java/org/apache/kafka/connect/rest/basic/auth/extension/JaasBasicAuthFilter.java @@ -26,9 +26,7 @@ import org.slf4j.LoggerFactory; import java.nio.charset.StandardCharsets; import java.security.Principal; import java.util.ArrayList; -import java.util.Arrays; import java.util.Base64; -import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.function.Predicate; @@ -54,10 +52,10 @@ import jakarta.ws.rs.core.SecurityContext; public class JaasBasicAuthFilter implements ContainerRequestFilter { private static final Logger log = LoggerFactory.getLogger(JaasBasicAuthFilter.class); - private static final Set INTERNAL_REQUEST_MATCHERS = new HashSet<>(Arrays.asList( + private static final Set INTERNAL_REQUEST_MATCHERS = Set.of( new RequestMatcher(HttpMethod.POST, "/?connectors/([^/]+)/tasks/?"), new RequestMatcher(HttpMethod.PUT, "/?connectors/[^/]+/fence/?") - )); + ); private static final String CONNECT_LOGIN_MODULE = "KafkaConnect"; static final String AUTHORIZATION = "Authorization"; diff --git a/connect/json/src/test/java/org/apache/kafka/connect/json/JsonConverterTest.java b/connect/json/src/test/java/org/apache/kafka/connect/json/JsonConverterTest.java index d79c8527b3c..c4a5356d59f 100644 --- a/connect/json/src/test/java/org/apache/kafka/connect/json/JsonConverterTest.java +++ b/connect/json/src/test/java/org/apache/kafka/connect/json/JsonConverterTest.java @@ -626,8 +626,8 @@ public class JsonConverterTest { Set payloadEntries = new HashSet<>(); for (JsonNode elem : payload) payloadEntries.add(elem); - assertEquals(new HashSet<>(Arrays.asList(JsonNodeFactory.instance.arrayNode().add(1).add(12), - JsonNodeFactory.instance.arrayNode().add(2).add(15))), + assertEquals(Set.of(JsonNodeFactory.instance.arrayNode().add(1).add(12), + JsonNodeFactory.instance.arrayNode().add(2).add(15)), payloadEntries ); } @@ -805,9 +805,9 @@ public class JsonConverterTest { Set payloadEntries = new HashSet<>(); for (JsonNode elem : payload) payloadEntries.add(elem); - assertEquals(new HashSet<>(Arrays.asList(JsonNodeFactory.instance.arrayNode().add("string").add(12), + assertEquals(Set.of(JsonNodeFactory.instance.arrayNode().add("string").add(12), JsonNodeFactory.instance.arrayNode().add(52).add("string"), - JsonNodeFactory.instance.arrayNode().add(false).add(true))), + JsonNodeFactory.instance.arrayNode().add(false).add(true)), payloadEntries ); } diff --git a/connect/mirror-client/src/test/java/org/apache/kafka/connect/mirror/MirrorClientTest.java b/connect/mirror-client/src/test/java/org/apache/kafka/connect/mirror/MirrorClientTest.java index be728a0ebe9..f7dbac5ad97 100644 --- a/connect/mirror-client/src/test/java/org/apache/kafka/connect/mirror/MirrorClientTest.java +++ b/connect/mirror-client/src/test/java/org/apache/kafka/connect/mirror/MirrorClientTest.java @@ -97,8 +97,8 @@ public class MirrorClientTest { MirrorClient client = new FakeMirrorClient(Arrays.asList("topic1", "topic2", "heartbeats", "source1.heartbeats", "source2.source1.heartbeats", "source3.heartbeats")); Set heartbeatTopics = client.heartbeatTopics(); - assertEquals(heartbeatTopics, new HashSet<>(Arrays.asList("heartbeats", "source1.heartbeats", - "source2.source1.heartbeats", "source3.heartbeats"))); + assertEquals(heartbeatTopics, Set.of("heartbeats", "source1.heartbeats", + "source2.source1.heartbeats", "source3.heartbeats")); } @Test @@ -106,8 +106,8 @@ public class MirrorClientTest { MirrorClient client = new FakeMirrorClient(Arrays.asList("topic1", "topic2", "checkpoints.internal", "source1.checkpoints.internal", "source2.source1.checkpoints.internal", "source3.checkpoints.internal")); Set checkpointTopics = client.checkpointTopics(); - assertEquals(new HashSet<>(Arrays.asList("source1.checkpoints.internal", - "source2.source1.checkpoints.internal", "source3.checkpoints.internal")), checkpointTopics); + assertEquals(Set.of("source1.checkpoints.internal", + "source2.source1.checkpoints.internal", "source3.checkpoints.internal"), checkpointTopics); } @Test diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointMetrics.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointMetrics.java index 8ace7d1fc3b..71e3edebf5b 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointMetrics.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointMetrics.java @@ -26,9 +26,7 @@ import org.apache.kafka.common.metrics.stats.Max; import org.apache.kafka.common.metrics.stats.Min; import org.apache.kafka.common.metrics.stats.Value; -import java.util.Arrays; import java.util.HashMap; -import java.util.HashSet; import java.util.LinkedHashMap; import java.util.Map; import java.util.Set; @@ -38,7 +36,7 @@ class MirrorCheckpointMetrics implements AutoCloseable { private static final String CHECKPOINT_CONNECTOR_GROUP = MirrorCheckpointConnector.class.getSimpleName(); - private static final Set GROUP_TAGS = new HashSet<>(Arrays.asList("source", "target", "group", "topic", "partition")); + private static final Set GROUP_TAGS = Set.of("source", "target", "group", "topic", "partition"); private static final MetricNameTemplate CHECKPOINT_LATENCY = new MetricNameTemplate( "checkpoint-latency-ms", CHECKPOINT_CONNECTOR_GROUP, diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceMetrics.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceMetrics.java index 7e33967c9f1..c297c4c5fcf 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceMetrics.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceMetrics.java @@ -27,8 +27,6 @@ import org.apache.kafka.common.metrics.stats.Meter; import org.apache.kafka.common.metrics.stats.Min; import org.apache.kafka.common.metrics.stats.Value; -import java.util.Arrays; -import java.util.HashSet; import java.util.LinkedHashMap; import java.util.Map; import java.util.Set; @@ -62,7 +60,7 @@ class MirrorSourceMetrics implements AutoCloseable { this.source = taskConfig.sourceClusterAlias(); this.metrics = new Metrics(); - Set partitionTags = new HashSet<>(Arrays.asList("source", "target", "topic", "partition")); + Set partitionTags = Set.of("source", "target", "topic", "partition"); recordCount = new MetricNameTemplate( "record-count", SOURCE_CONNECTOR_GROUP, diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorSourceTaskTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorSourceTaskTest.java index d8322fe2240..4a676855378 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorSourceTaskTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorSourceTaskTest.java @@ -35,9 +35,7 @@ import org.apache.kafka.connect.storage.OffsetStorageReader; import org.junit.jupiter.api.Test; import java.util.ArrayList; -import java.util.Arrays; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; @@ -224,14 +222,14 @@ public class MirrorSourceTaskTest { OffsetStorageReader mockOffsetStorageReader = mock(OffsetStorageReader.class); when(mockSourceTaskContext.offsetStorageReader()).thenReturn(mockOffsetStorageReader); - Set topicPartitions = new HashSet<>(Arrays.asList( + Set topicPartitions = Set.of( new TopicPartition("previouslyReplicatedTopic", 8), new TopicPartition("previouslyReplicatedTopic1", 0), new TopicPartition("previouslyReplicatedTopic", 1), new TopicPartition("newTopicToReplicate1", 1), new TopicPartition("newTopicToReplicate1", 4), new TopicPartition("newTopicToReplicate2", 0) - )); + ); long arbitraryCommittedOffset = 4L; long offsetToSeek = arbitraryCommittedOffset + 1L; diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsIntegrationBaseTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsIntegrationBaseTest.java index b278285e606..a83189757c8 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsIntegrationBaseTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsIntegrationBaseTest.java @@ -1101,7 +1101,7 @@ public class MirrorConnectorsIntegrationBaseTest { } protected static void alterMirrorMakerSourceConnectorOffsets(EmbeddedConnectCluster connectCluster, LongUnaryOperator alterOffset, String... topics) { - Set topicsSet = new HashSet<>(Arrays.asList(topics)); + Set topicsSet = Set.of(topics); String connectorName = MirrorSourceConnector.class.getSimpleName(); ConnectorOffsets currentOffsets = connectCluster.connectorOffsets(connectorName); @@ -1131,7 +1131,7 @@ public class MirrorConnectorsIntegrationBaseTest { } protected static void resetSomeMirrorMakerSourceConnectorOffsets(EmbeddedConnectCluster connectCluster, String... topics) { - Set topicsSet = new HashSet<>(Arrays.asList(topics)); + Set topicsSet = Set.of(topics); String connectorName = MirrorSourceConnector.class.getSimpleName(); ConnectorOffsets currentOffsets = connectCluster.connectorOffsets(connectorName); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/Plugins.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/Plugins.java index 97094bc89c8..130db0ab61c 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/Plugins.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/Plugins.java @@ -45,7 +45,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -271,7 +270,7 @@ public class Plugins { public String pluginVersion(String classOrAlias, ClassLoader sourceLoader, PluginType... allowedTypes) { String location = (sourceLoader instanceof PluginClassLoader) ? ((PluginClassLoader) sourceLoader).location() : null; - PluginDesc desc = delegatingLoader.pluginDesc(classOrAlias, location, new HashSet<>(Arrays.asList(allowedTypes))); + PluginDesc desc = delegatingLoader.pluginDesc(classOrAlias, location, Set.of(allowedTypes)); if (desc != null) { return desc.version(); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskTest.java index 539960badec..abde7340e8d 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskTest.java @@ -483,7 +483,7 @@ public class WorkerSinkTaskTest { workerTask.initializeAndStart(); verifyInitializeTask(); - Set newAssignment = new HashSet<>(Arrays.asList(TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3)); + Set newAssignment = Set.of(TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3); when(consumer.assignment()) .thenReturn(INITIAL_ASSIGNMENT, INITIAL_ASSIGNMENT, INITIAL_ASSIGNMENT) @@ -638,8 +638,8 @@ public class WorkerSinkTaskTest { .thenReturn(INITIAL_ASSIGNMENT) .thenReturn(Collections.singleton(TOPIC_PARTITION2)) .thenReturn(Collections.singleton(TOPIC_PARTITION2)) - .thenReturn(new HashSet<>(Arrays.asList(TOPIC_PARTITION2, TOPIC_PARTITION3))) - .thenReturn(new HashSet<>(Arrays.asList(TOPIC_PARTITION2, TOPIC_PARTITION3))) + .thenReturn(Set.of(TOPIC_PARTITION2, TOPIC_PARTITION3)) + .thenReturn(Set.of(TOPIC_PARTITION2, TOPIC_PARTITION3)) .thenReturn(INITIAL_ASSIGNMENT) .thenReturn(INITIAL_ASSIGNMENT) .thenReturn(INITIAL_ASSIGNMENT); @@ -710,12 +710,12 @@ public class WorkerSinkTaskTest { when(consumer.assignment()) .thenReturn(INITIAL_ASSIGNMENT, INITIAL_ASSIGNMENT) - .thenReturn(new HashSet<>(Collections.singletonList(TOPIC_PARTITION2))) - .thenReturn(new HashSet<>(Collections.singletonList(TOPIC_PARTITION2))) - .thenReturn(new HashSet<>(Collections.singletonList(TOPIC_PARTITION2))) - .thenReturn(new HashSet<>(Arrays.asList(TOPIC_PARTITION2, TOPIC_PARTITION3))) - .thenReturn(new HashSet<>(Arrays.asList(TOPIC_PARTITION2, TOPIC_PARTITION3))) - .thenReturn(new HashSet<>(Arrays.asList(TOPIC_PARTITION2, TOPIC_PARTITION3))); + .thenReturn(Set.of(TOPIC_PARTITION2)) + .thenReturn(Set.of(TOPIC_PARTITION2)) + .thenReturn(Set.of(TOPIC_PARTITION2)) + .thenReturn(Set.of(TOPIC_PARTITION2, TOPIC_PARTITION3)) + .thenReturn(Set.of(TOPIC_PARTITION2, TOPIC_PARTITION3)) + .thenReturn(Set.of(TOPIC_PARTITION2, TOPIC_PARTITION3)); INITIAL_ASSIGNMENT.forEach(tp -> when(consumer.position(tp)).thenReturn(FIRST_OFFSET)); when(consumer.position(TOPIC_PARTITION3)).thenReturn(FIRST_OFFSET); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskThreadedTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskThreadedTest.java index 6c2c593c35b..0693f06dfe4 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskThreadedTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskThreadedTest.java @@ -65,7 +65,6 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; @@ -111,8 +110,8 @@ public class WorkerSinkTaskThreadedTest { private static final TopicPartition TOPIC_PARTITION2 = new TopicPartition(TOPIC, PARTITION2); private static final TopicPartition TOPIC_PARTITION3 = new TopicPartition(TOPIC, PARTITION3); private static final TopicPartition UNASSIGNED_TOPIC_PARTITION = new TopicPartition(TOPIC, 200); - private static final Set INITIAL_ASSIGNMENT = new HashSet<>(Arrays.asList( - TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3)); + private static final Set INITIAL_ASSIGNMENT = Set.of( + TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3); private static final Map TASK_PROPS = new HashMap<>(); private static final long TIMESTAMP = 42L; @@ -439,7 +438,7 @@ public class WorkerSinkTaskThreadedTest { doAnswer(invocation -> { return null; // initial assignment }).doAnswer(invocation -> { - assertEquals(new HashSet<>(Arrays.asList(TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3)), sinkTaskContext.getValue().assignment()); + assertEquals(Set.of(TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3), sinkTaskContext.getValue().assignment()); return null; }).doAnswer(invocation -> { try { diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/DistributedHerderTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/DistributedHerderTest.java index 18589d66855..aad21f3509c 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/DistributedHerderTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/DistributedHerderTest.java @@ -1031,7 +1031,7 @@ public class DistributedHerderTest { // tasks are revoked TopicStatus fooStatus = new TopicStatus(FOO_TOPIC, CONN1, 0, time.milliseconds()); TopicStatus barStatus = new TopicStatus(BAR_TOPIC, CONN1, 0, time.milliseconds()); - when(statusBackingStore.getAllTopics(eq(CONN1))).thenReturn(new HashSet<>(Arrays.asList(fooStatus, barStatus))); + when(statusBackingStore.getAllTopics(eq(CONN1))).thenReturn(Set.of(fooStatus, barStatus)); doNothing().when(statusBackingStore).deleteTopic(eq(CONN1), eq(FOO_TOPIC)); doNothing().when(statusBackingStore).deleteTopic(eq(CONN1), eq(BAR_TOPIC)); @@ -3232,7 +3232,7 @@ public class DistributedHerderTest { taskConfigGenerations.put(CONN1, 3); taskConfigGenerations.put(CONN2, 4); taskConfigGenerations.put(conn3, 2); - Set pendingFencing = new HashSet<>(Arrays.asList(CONN1, CONN2, conn3)); + Set pendingFencing = Set.of(CONN1, CONN2, conn3); ClusterConfigState configState = exactlyOnceSnapshot( sessionKey, TASK_CONFIGS_MAP, diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/IncrementalCooperativeAssignorTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/IncrementalCooperativeAssignorTest.java index 86bc897fafe..8e10a07a015 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/IncrementalCooperativeAssignorTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/IncrementalCooperativeAssignorTest.java @@ -923,7 +923,7 @@ public class IncrementalCooperativeAssignorTest { assignor.handleLostAssignments(lostAssignments, new ConnectorsAndTasks.Builder(), new ArrayList<>(configuredAssignment.values())); - Set expectedWorkers = new HashSet<>(Arrays.asList(newWorker, flakyWorker)); + Set expectedWorkers = Set.of(newWorker, flakyWorker); assertEquals(expectedWorkers, assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); @@ -1455,7 +1455,7 @@ public class IncrementalCooperativeAssignorTest { } private void assertWorkers(String... workers) { - assertEquals(new HashSet<>(Arrays.asList(workers)), returnedAssignments.allWorkers(), "Wrong set of workers"); + assertEquals(Set.of(workers), returnedAssignments.allWorkers(), "Wrong set of workers"); } /** diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorsResourceTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorsResourceTest.java index e38cd2da60d..d63abc8dd45 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorsResourceTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorsResourceTest.java @@ -200,7 +200,7 @@ public class ConnectorsResourceTest { Collection connectors = (Collection) connectorsResource.listConnectors(forward, NULL_HEADERS).getEntity(); // Ordering isn't guaranteed, compare sets - assertEquals(new HashSet<>(Arrays.asList(CONNECTOR_NAME, CONNECTOR2_NAME)), new HashSet<>(connectors)); + assertEquals(Set.of(CONNECTOR_NAME, CONNECTOR2_NAME), new HashSet<>(connectors)); } @Test @@ -218,7 +218,7 @@ public class ConnectorsResourceTest { Map> expanded = (Map>) connectorsResource.listConnectors(forward, NULL_HEADERS).getEntity(); // Ordering isn't guaranteed, compare sets - assertEquals(new HashSet<>(Arrays.asList(CONNECTOR_NAME, CONNECTOR2_NAME)), expanded.keySet()); + assertEquals(Set.of(CONNECTOR_NAME, CONNECTOR2_NAME), expanded.keySet()); assertEquals(connector2, expanded.get(CONNECTOR2_NAME).get("status")); assertEquals(connector, expanded.get(CONNECTOR_NAME).get("status")); } @@ -238,7 +238,7 @@ public class ConnectorsResourceTest { Map> expanded = (Map>) connectorsResource.listConnectors(forward, NULL_HEADERS).getEntity(); // Ordering isn't guaranteed, compare sets - assertEquals(new HashSet<>(Arrays.asList(CONNECTOR_NAME, CONNECTOR2_NAME)), expanded.keySet()); + assertEquals(Set.of(CONNECTOR_NAME, CONNECTOR2_NAME), expanded.keySet()); assertEquals(connector2, expanded.get(CONNECTOR2_NAME).get("info")); assertEquals(connector, expanded.get(CONNECTOR_NAME).get("info")); } @@ -262,7 +262,7 @@ public class ConnectorsResourceTest { Map> expanded = (Map>) connectorsResource.listConnectors(forward, NULL_HEADERS).getEntity(); // Ordering isn't guaranteed, compare sets - assertEquals(new HashSet<>(Arrays.asList(CONNECTOR_NAME, CONNECTOR2_NAME)), expanded.keySet()); + assertEquals(Set.of(CONNECTOR_NAME, CONNECTOR2_NAME), expanded.keySet()); assertEquals(connectorInfo2, expanded.get(CONNECTOR2_NAME).get("info")); assertEquals(connectorInfo, expanded.get(CONNECTOR_NAME).get("info")); assertEquals(connector2, expanded.get(CONNECTOR2_NAME).get("status")); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaStatusBackingStoreFormatTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaStatusBackingStoreFormatTest.java index 83d9e953478..df7b7f17401 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaStatusBackingStoreFormatTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaStatusBackingStoreFormatTest.java @@ -35,9 +35,9 @@ import org.mockito.junit.jupiter.MockitoExtension; import org.mockito.junit.jupiter.MockitoSettings; import org.mockito.quality.Strictness; -import java.util.Arrays; import java.util.Collections; import java.util.HashSet; +import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import static org.apache.kafka.connect.json.JsonConverterConfig.SCHEMAS_ENABLE_CONFIG; @@ -277,7 +277,7 @@ public class KafkaStatusBackingStoreFormatTest { assertEquals(secondTopicStatus, store.parseTopicStatus(valueCaptor.getValue())); assertEquals(firstTopicStatus, store.getTopic(FOO_CONNECTOR, FOO_TOPIC)); assertEquals(secondTopicStatus, store.getTopic(FOO_CONNECTOR, BAR_TOPIC)); - assertEquals(new HashSet<>(Arrays.asList(firstTopicStatus, secondTopicStatus)), new HashSet<>(store.getAllTopics(FOO_CONNECTOR))); + assertEquals(Set.of(firstTopicStatus, secondTopicStatus), new HashSet<>(store.getAllTopics(FOO_CONNECTOR))); } } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaStatusBackingStoreTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaStatusBackingStoreTest.java index a9ac5f483be..108dbbc45c3 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaStatusBackingStoreTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaStatusBackingStoreTest.java @@ -43,11 +43,11 @@ import org.mockito.junit.jupiter.MockitoExtension; import org.mockito.junit.jupiter.MockitoSettings; import org.mockito.quality.Strictness; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Optional; +import java.util.Set; import java.util.function.Supplier; import static org.apache.kafka.clients.CommonClientConfigs.CLIENT_ID_CONFIG; @@ -388,8 +388,8 @@ public class KafkaStatusBackingStoreTest { verify(kafkaBasedLog).send(eq("status-connector-" + CONNECTOR), eq(value), any(Callback.class)); verify(kafkaBasedLog).send(eq("status-task-conn-0"), eq(value), any(Callback.class)); - assertEquals(new HashSet<>(Collections.singletonList(CONNECTOR)), store.connectors()); - assertEquals(new HashSet<>(Collections.singletonList(taskStatus)), new HashSet<>(store.getAll(CONNECTOR))); + assertEquals(Set.of(CONNECTOR), store.connectors()); + assertEquals(Set.of(taskStatus), new HashSet<>(store.getAll(CONNECTOR))); store.read(consumerRecord(0, "status-connector-conn", null)); assertTrue(store.connectors().isEmpty()); assertTrue(store.getAll(CONNECTOR).isEmpty()); @@ -412,7 +412,7 @@ public class KafkaStatusBackingStoreTest { verify(kafkaBasedLog).send(eq("status-task-conn-0"), eq(value), any(Callback.class)); - assertEquals(new HashSet<>(Collections.singletonList(taskStatus)), new HashSet<>(store.getAll(CONNECTOR))); + assertEquals(Set.of(taskStatus), new HashSet<>(store.getAll(CONNECTOR))); store.read(consumerRecord(0, "status-task-conn-0", null)); assertTrue(store.getAll(CONNECTOR).isEmpty()); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/KafkaBasedLogTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/KafkaBasedLogTest.java index aabf894e1ea..eb37641a064 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/KafkaBasedLogTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/KafkaBasedLogTest.java @@ -51,7 +51,6 @@ import org.mockito.quality.Strictness; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; @@ -100,7 +99,7 @@ public class KafkaBasedLogTest { CONSUMER_PROPS.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer"); } - private static final Set CONSUMER_ASSIGNMENT = new HashSet<>(Arrays.asList(TP0, TP1)); + private static final Set CONSUMER_ASSIGNMENT = Set.of(TP0, TP1); private static final Map FIRST_SET = new HashMap<>(); static { FIRST_SET.put("key", "value"); @@ -408,7 +407,7 @@ public class KafkaBasedLogTest { @Test public void testOffsetReadFailureWhenWorkThreadFails() throws Exception { RuntimeException exception = new RuntimeException(); - Set tps = new HashSet<>(Arrays.asList(TP0, TP1)); + Set tps = Set.of(TP0, TP1); Map endOffsets = new HashMap<>(); endOffsets.put(TP0, 0L); endOffsets.put(TP1, 0L); @@ -482,7 +481,7 @@ public class KafkaBasedLogTest { @Test public void testReadEndOffsetsUsingAdmin() { - Set tps = new HashSet<>(Arrays.asList(TP0, TP1)); + Set tps = Set.of(TP0, TP1); Map endOffsets = new HashMap<>(); endOffsets.put(TP0, 0L); endOffsets.put(TP1, 0L); @@ -498,7 +497,7 @@ public class KafkaBasedLogTest { @Test public void testReadEndOffsetsUsingAdminThatFailsWithUnsupported() { - Set tps = new HashSet<>(Arrays.asList(TP0, TP1)); + Set tps = Set.of(TP0, TP1); admin = mock(TopicAdmin.class); // Getting end offsets using the admin client should fail with unsupported version when(admin.retryEndOffsets(eq(tps), any(), anyLong())).thenThrow(new UnsupportedVersionException("too old")); @@ -516,7 +515,7 @@ public class KafkaBasedLogTest { @Test public void testReadEndOffsetsUsingAdminThatFailsWithRetriable() { - Set tps = new HashSet<>(Arrays.asList(TP0, TP1)); + Set tps = Set.of(TP0, TP1); Map endOffsets = new HashMap<>(); endOffsets.put(TP0, 0L); endOffsets.put(TP1, 0L); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicAdminTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicAdminTest.java index 1f25dd15f51..374d483c060 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicAdminTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicAdminTest.java @@ -65,7 +65,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -667,7 +666,7 @@ public class TopicAdminTest { String topicName = "myTopic"; TopicPartition tp1 = new TopicPartition(topicName, 0); TopicPartition tp2 = new TopicPartition(topicName, 1); - Set tps = new HashSet<>(Arrays.asList(tp1, tp2)); + Set tps = Set.of(tp1, tp2); long offset1 = 1001; long offset2 = 1002; Cluster cluster = createCluster(1, topicName, 2); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicCreationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicCreationTest.java index ca358f18f43..5583d292b93 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicCreationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicCreationTest.java @@ -33,12 +33,11 @@ import org.apache.kafka.connect.transforms.RegexRouter; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.TimeUnit; import static org.apache.kafka.common.config.TopicConfig.CLEANUP_POLICY_COMPACT; @@ -136,7 +135,7 @@ public class TopicCreationTest { assertTrue(topicCreation.isTopicCreationRequired(FOO_TOPIC)); assertEquals(topicCreation.defaultTopicGroup(), groups.get(DEFAULT_TOPIC_CREATION_GROUP)); assertEquals(2, topicCreation.topicGroups().size()); - assertEquals(new HashSet<>(Arrays.asList(FOO_GROUP, BAR_GROUP)), topicCreation.topicGroups().keySet()); + assertEquals(Set.of(FOO_GROUP, BAR_GROUP), topicCreation.topicGroups().keySet()); assertEquals(topicCreation.defaultTopicGroup(), topicCreation.findFirstGroup(FOO_TOPIC)); topicCreation.addTopic(FOO_TOPIC); assertFalse(topicCreation.isTopicCreationRequired(FOO_TOPIC)); @@ -300,7 +299,7 @@ public class TopicCreationTest { // verify topic creation group is instantiated correctly Map groups = TopicCreationGroup.configuredGroups(sourceConfig); assertEquals(2, groups.size()); - assertEquals(new HashSet<>(Arrays.asList(DEFAULT_TOPIC_CREATION_GROUP, FOO_GROUP)), groups.keySet()); + assertEquals(Set.of(DEFAULT_TOPIC_CREATION_GROUP, FOO_GROUP), groups.keySet()); // verify topic creation TopicCreation topicCreation = TopicCreation.newTopicCreation(workerConfig, groups); @@ -365,7 +364,7 @@ public class TopicCreationTest { // verify topic creation group is instantiated correctly Map groups = TopicCreationGroup.configuredGroups(sourceConfig); assertEquals(2, groups.size()); - assertEquals(new HashSet<>(Arrays.asList(DEFAULT_TOPIC_CREATION_GROUP, FOO_GROUP)), groups.keySet()); + assertEquals(Set.of(DEFAULT_TOPIC_CREATION_GROUP, FOO_GROUP), groups.keySet()); // verify topic creation TopicCreation topicCreation = TopicCreation.newTopicCreation(workerConfig, groups); @@ -442,7 +441,7 @@ public class TopicCreationTest { // verify topic creation group is instantiated correctly Map groups = TopicCreationGroup.configuredGroups(sourceConfig); assertEquals(3, groups.size()); - assertEquals(new HashSet<>(Arrays.asList(DEFAULT_TOPIC_CREATION_GROUP, FOO_GROUP, BAR_GROUP)), groups.keySet()); + assertEquals(Set.of(DEFAULT_TOPIC_CREATION_GROUP, FOO_GROUP, BAR_GROUP), groups.keySet()); // verify topic creation TopicCreation topicCreation = TopicCreation.newTopicCreation(workerConfig, groups); @@ -466,7 +465,7 @@ public class TopicCreationTest { assertTrue(topicCreation.isTopicCreationRequired(FOO_TOPIC)); assertTrue(topicCreation.isTopicCreationRequired(BAR_TOPIC)); assertEquals(2, topicCreation.topicGroups().size()); - assertEquals(new HashSet<>(Arrays.asList(FOO_GROUP, BAR_GROUP)), topicCreation.topicGroups().keySet()); + assertEquals(Set.of(FOO_GROUP, BAR_GROUP), topicCreation.topicGroups().keySet()); assertEquals(fooGroup, topicCreation.findFirstGroup(FOO_TOPIC)); assertEquals(barGroup, topicCreation.findFirstGroup(BAR_TOPIC)); topicCreation.addTopic(FOO_TOPIC); @@ -514,7 +513,7 @@ public class TopicCreationTest { assertTrue(topicCreation.isTopicCreationRequired(FOO_TOPIC)); assertEquals(groups.get(DEFAULT_TOPIC_CREATION_GROUP), topicCreation.defaultTopicGroup()); assertEquals(2, topicCreation.topicGroups().size()); - assertEquals(new HashSet<>(Arrays.asList(FOO_GROUP, BAR_GROUP)), topicCreation.topicGroups().keySet()); + assertEquals(Set.of(FOO_GROUP, BAR_GROUP), topicCreation.topicGroups().keySet()); assertEquals(topicCreation.defaultTopicGroup(), topicCreation.findFirstGroup(FOO_TOPIC)); topicCreation.addTopic(FOO_TOPIC); assertFalse(topicCreation.isTopicCreationRequired(FOO_TOPIC)); @@ -574,7 +573,7 @@ public class TopicCreationTest { // verify topic creation group is instantiated correctly Map groups = TopicCreationGroup.configuredGroups(sourceConfig); assertEquals(3, groups.size()); - assertEquals(new HashSet<>(Arrays.asList(DEFAULT_TOPIC_CREATION_GROUP, FOO_GROUP, BAR_GROUP)), groups.keySet()); + assertEquals(Set.of(DEFAULT_TOPIC_CREATION_GROUP, FOO_GROUP, BAR_GROUP), groups.keySet()); // verify topic creation TopicCreation topicCreation = TopicCreation.newTopicCreation(workerConfig, groups); @@ -598,7 +597,7 @@ public class TopicCreationTest { assertTrue(topicCreation.isTopicCreationRequired(FOO_TOPIC)); assertTrue(topicCreation.isTopicCreationRequired(BAR_TOPIC)); assertEquals(2, topicCreation.topicGroups().size()); - assertEquals(new HashSet<>(Arrays.asList(FOO_GROUP, BAR_GROUP)), topicCreation.topicGroups().keySet()); + assertEquals(Set.of(FOO_GROUP, BAR_GROUP), topicCreation.topicGroups().keySet()); assertEquals(fooGroup, topicCreation.findFirstGroup(FOO_TOPIC)); assertEquals(barGroup, topicCreation.findFirstGroup(BAR_TOPIC)); topicCreation.addTopic(FOO_TOPIC); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/ConnectAssertions.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/ConnectAssertions.java index 8dc22edb863..c901361cb64 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/ConnectAssertions.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/ConnectAssertions.java @@ -25,7 +25,6 @@ import org.apache.kafka.connect.runtime.rest.errors.ConnectRestException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.Arrays; import java.util.Collection; import java.util.HashSet; import java.util.Map; @@ -147,7 +146,7 @@ public class ConnectAssertions { * @param topicNames the names of the topics that are expected to not exist */ public void assertTopicsDoNotExist(String... topicNames) throws InterruptedException { - Set topicNameSet = new HashSet<>(Arrays.asList(topicNames)); + Set topicNameSet = Set.of(topicNames); AtomicReference> existingTopics = new AtomicReference<>(topicNameSet); waitForCondition( () -> checkTopicsExist(topicNameSet, (actual, expected) -> { @@ -164,7 +163,7 @@ public class ConnectAssertions { * @param topicNames the names of the topics that are expected to exist */ public void assertTopicsExist(String... topicNames) throws InterruptedException { - Set topicNameSet = new HashSet<>(Arrays.asList(topicNames)); + Set topicNameSet = Set.of(topicNames); AtomicReference> missingTopics = new AtomicReference<>(topicNameSet); waitForCondition( () -> checkTopicsExist(topicNameSet, (actual, expected) -> { diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedKafkaCluster.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedKafkaCluster.java index 5d075ab75e0..8f2fb1aed7d 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedKafkaCluster.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedKafkaCluster.java @@ -65,7 +65,6 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -282,7 +281,7 @@ public class EmbeddedKafkaCluster { * @return the map of optional {@link TopicDescription} keyed by the topic name */ public Map> describeTopics(String... topicNames) { - return describeTopics(new HashSet<>(Arrays.asList(topicNames))); + return describeTopics(Set.of(topicNames)); } /** diff --git a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeMetricsImplTest.java b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeMetricsImplTest.java index ed6d2697634..04ba1264ee5 100644 --- a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeMetricsImplTest.java +++ b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeMetricsImplTest.java @@ -27,8 +27,7 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; -import java.util.Arrays; -import java.util.HashSet; +import java.util.Set; import java.util.stream.IntStream; import static org.apache.kafka.coordinator.common.runtime.CoordinatorRuntimeMetricsImpl.BATCH_FLUSH_TIME_METRIC_NAME; @@ -49,7 +48,7 @@ public class CoordinatorRuntimeMetricsImplTest { public void testMetricNames() { Metrics metrics = new Metrics(); - HashSet expectedMetrics = new HashSet<>(Arrays.asList( + Set expectedMetrics = Set.of( kafkaMetricName(metrics, NUM_PARTITIONS_METRIC_NAME, "state", "loading"), kafkaMetricName(metrics, NUM_PARTITIONS_METRIC_NAME, "state", "active"), kafkaMetricName(metrics, NUM_PARTITIONS_METRIC_NAME, "state", "failed"), @@ -77,7 +76,7 @@ public class CoordinatorRuntimeMetricsImplTest { kafkaMetricName(metrics, "batch-flush-time-ms-p95"), kafkaMetricName(metrics, "batch-flush-time-ms-p99"), kafkaMetricName(metrics, "batch-flush-time-ms-p999") - )); + ); try (CoordinatorRuntimeMetricsImpl runtimeMetrics = new CoordinatorRuntimeMetricsImpl(metrics, METRICS_GROUP)) { runtimeMetrics.registerEventQueueSizeGauge(() -> 0); diff --git a/core/src/test/java/kafka/security/minikdc/MiniKdc.java b/core/src/test/java/kafka/security/minikdc/MiniKdc.java index 99740a133d0..b612543771d 100644 --- a/core/src/test/java/kafka/security/minikdc/MiniKdc.java +++ b/core/src/test/java/kafka/security/minikdc/MiniKdc.java @@ -136,8 +136,8 @@ public class MiniKdc { * MiniKdc. */ public MiniKdc(Properties config, File workDir) { - Set requiredProperties = new HashSet<>(List.of(ORG_NAME, ORG_DOMAIN, KDC_BIND_ADDRESS, KDC_PORT, - INSTANCE, TRANSPORT, MAX_TICKET_LIFETIME, MAX_RENEWABLE_LIFETIME)); + Set requiredProperties = Set.of(ORG_NAME, ORG_DOMAIN, KDC_BIND_ADDRESS, KDC_PORT, + INSTANCE, TRANSPORT, MAX_TICKET_LIFETIME, MAX_RENEWABLE_LIFETIME); if (!config.keySet().containsAll(requiredProperties)) { throw new IllegalArgumentException("Missing required properties: " + requiredProperties); } diff --git a/core/src/test/java/kafka/server/ReconfigurableQuorumIntegrationTest.java b/core/src/test/java/kafka/server/ReconfigurableQuorumIntegrationTest.java index fa906a8ffb4..5e21c6099e7 100644 --- a/core/src/test/java/kafka/server/ReconfigurableQuorumIntegrationTest.java +++ b/core/src/test/java/kafka/server/ReconfigurableQuorumIntegrationTest.java @@ -29,8 +29,6 @@ import org.apache.kafka.test.TestUtils; import org.junit.jupiter.api.Test; -import java.util.HashSet; -import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeMap; @@ -119,7 +117,7 @@ public class ReconfigurableQuorumIntegrationTest { try (Admin admin = Admin.create(cluster.clientProperties())) { TestUtils.retryOnExceptionWithTimeout(30_000, 10, () -> { Map voters = findVoterDirs(admin); - assertEquals(new HashSet<>(List.of(3000, 3001, 3002)), voters.keySet()); + assertEquals(Set.of(3000, 3001, 3002), voters.keySet()); for (int replicaId : new int[] {3000, 3001, 3002}) { assertNotEquals(Uuid.ZERO_UUID, voters.get(replicaId)); } @@ -144,7 +142,7 @@ public class ReconfigurableQuorumIntegrationTest { try (Admin admin = Admin.create(cluster.clientProperties())) { TestUtils.retryOnExceptionWithTimeout(30_000, 10, () -> { Map voters = findVoterDirs(admin); - assertEquals(new HashSet<>(List.of(3000, 3001, 3002, 3003)), voters.keySet()); + assertEquals(Set.of(3000, 3001, 3002, 3003), voters.keySet()); for (int replicaId : new int[] {3000, 3001, 3002, 3003}) { assertNotEquals(Uuid.ZERO_UUID, voters.get(replicaId)); } @@ -153,7 +151,7 @@ public class ReconfigurableQuorumIntegrationTest { admin.removeRaftVoter(3000, dirId).all().get(); TestUtils.retryOnExceptionWithTimeout(30_000, 10, () -> { Map voters = findVoterDirs(admin); - assertEquals(new HashSet<>(List.of(3001, 3002, 3003)), voters.keySet()); + assertEquals(Set.of(3001, 3002, 3003), voters.keySet()); for (int replicaId : new int[] {3001, 3002, 3003}) { assertNotEquals(Uuid.ZERO_UUID, voters.get(replicaId)); } diff --git a/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java b/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java index 314be4393f8..5d2d2e9a377 100644 --- a/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java +++ b/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java @@ -1013,7 +1013,7 @@ public class SharePartitionManagerTest { ShareFetchResponse resp1 = context1.updateAndGenerateResponseData(groupId, reqMetadata1.memberId(), respData1); assertEquals(Errors.NONE, resp1.error()); - assertEquals(new HashSet<>(List.of(tp0, tp1)), + assertEquals(Set.of(tp0, tp1), new HashSet<>(sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId1))); // Create a new share session with an initial share fetch request. @@ -1047,7 +1047,7 @@ public class SharePartitionManagerTest { ShareFetchResponse resp3 = context3.updateAndGenerateResponseData(groupId, reqMetadata1.memberId(), respData3); assertEquals(Errors.NONE, resp3.error()); - assertEquals(new HashSet<>(List.of(tp0, tp1, tp2)), + assertEquals(Set.of(tp0, tp1, tp2), new HashSet<>(sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId1))); // Continue the second session we created. diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/classic/ClassicGroupState.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/classic/ClassicGroupState.java index 5cca1c7e055..c2ddfcfd688 100644 --- a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/classic/ClassicGroupState.java +++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/classic/ClassicGroupState.java @@ -17,8 +17,6 @@ package org.apache.kafka.coordinator.group.classic; -import java.util.Arrays; -import java.util.HashSet; import java.util.Locale; import java.util.Set; @@ -131,7 +129,7 @@ public enum ClassicGroupState { } private void addValidPreviousStates(ClassicGroupState... validPreviousStates) { - this.validPreviousStates = new HashSet<>(Arrays.asList(validPreviousStates)); + this.validPreviousStates = Set.of(validPreviousStates); } /** diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/assignor/RangeSetTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/assignor/RangeSetTest.java index cc7ad991dec..2f9f0f171a7 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/assignor/RangeSetTest.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/assignor/RangeSetTest.java @@ -18,7 +18,6 @@ package org.apache.kafka.coordinator.group.assignor; import org.junit.jupiter.api.Test; -import java.util.HashSet; import java.util.Iterator; import java.util.NoSuchElementException; import java.util.Set; @@ -124,7 +123,7 @@ public class RangeSetTest { RangeSet rangeSet2 = new RangeSet(5, 10); RangeSet rangeSet3 = new RangeSet(6, 10); Set set = Set.of(5, 6, 7, 8, 9); - HashSet hashSet = new HashSet<>(Set.of(6, 7, 8, 9)); + Set hashSet = Set.of(6, 7, 8, 9); assertEquals(rangeSet1, rangeSet2); assertNotEquals(rangeSet1, rangeSet3); diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/classic/ClassicGroupTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/classic/ClassicGroupTest.java index dfcb415fd3e..1ba83e10cd6 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/classic/ClassicGroupTest.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/classic/ClassicGroupTest.java @@ -420,7 +420,7 @@ public class ClassicGroupTest { group.initNextGeneration(); - Set expectedTopics = new HashSet<>(Set.of("foo")); + Set expectedTopics = Set.of("foo"); assertEquals(expectedTopics, group.subscribedTopics().get()); group.transitionTo(PREPARING_REBALANCE); @@ -1275,14 +1275,14 @@ public class ClassicGroupTest { group.transitionTo(COMPLETING_REBALANCE); - assertTrue(group.isInStates(new HashSet<>(List.of("completingrebalance")), 0)); + assertTrue(group.isInStates(Set.of("completingrebalance"), 0)); group.transitionTo(STABLE); assertTrue(group.isInStates(Set.of("stable"), 0)); assertFalse(group.isInStates(Set.of("empty"), 0)); group.transitionTo(DEAD); - assertTrue(group.isInStates(new HashSet<>(List.of("dead", " ")), 0)); + assertTrue(group.isInStates(Set.of("dead", " "), 0)); } @Test diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/metrics/GroupCoordinatorMetricsTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/metrics/GroupCoordinatorMetricsTest.java index 3aa0a861725..fa285b5bbf6 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/metrics/GroupCoordinatorMetricsTest.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/metrics/GroupCoordinatorMetricsTest.java @@ -36,8 +36,6 @@ import com.yammer.metrics.core.MetricsRegistry; import org.junit.jupiter.api.Test; -import java.util.Arrays; -import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.stream.IntStream; @@ -63,7 +61,7 @@ public class GroupCoordinatorMetricsTest { MetricsRegistry registry = new MetricsRegistry(); Metrics metrics = new Metrics(); - HashSet expectedMetrics = new HashSet<>(Arrays.asList( + Set expectedMetrics = Set.of( metrics.metricName("offset-commit-rate", GroupCoordinatorMetrics.METRICS_GROUP), metrics.metricName("offset-commit-count", GroupCoordinatorMetrics.METRICS_GROUP), metrics.metricName("offset-expiration-rate", GroupCoordinatorMetrics.METRICS_GROUP), @@ -159,11 +157,11 @@ public class GroupCoordinatorMetricsTest { "streams-group-count", GroupCoordinatorMetrics.METRICS_GROUP, Map.of("state", StreamsGroupState.NOT_READY.toString())) - )); + ); try { try (GroupCoordinatorMetrics ignored = new GroupCoordinatorMetrics(registry, metrics)) { - HashSet expectedRegistry = new HashSet<>(Arrays.asList( + Set expectedRegistry = Set.of( "kafka.coordinator.group:type=GroupMetadataManager,name=NumOffsets", "kafka.coordinator.group:type=GroupMetadataManager,name=NumGroups", "kafka.coordinator.group:type=GroupMetadataManager,name=NumGroupsPreparingRebalance", @@ -171,7 +169,7 @@ public class GroupCoordinatorMetricsTest { "kafka.coordinator.group:type=GroupMetadataManager,name=NumGroupsStable", "kafka.coordinator.group:type=GroupMetadataManager,name=NumGroupsDead", "kafka.coordinator.group:type=GroupMetadataManager,name=NumGroupsEmpty" - )); + ); assertMetricsForTypeEqual(registry, "kafka.coordinator.group", expectedRegistry); expectedMetrics.forEach(metricName -> assertTrue(metrics.metrics().containsKey(metricName), metricName + " is missing")); diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/modern/consumer/ConsumerGroupTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/modern/consumer/ConsumerGroupTest.java index f6afa3ee08a..7850fde746d 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/modern/consumer/ConsumerGroupTest.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/modern/consumer/ConsumerGroupTest.java @@ -708,7 +708,7 @@ public class ConsumerGroupTest { // Initial assignment for member1 Assignment initialAssignment = new Assignment(Map.of( topicId, - new HashSet<>(List.of(0)) + Set.of(0) )); consumerGroup.updateTargetAssignment(memberId1, initialAssignment); @@ -723,7 +723,7 @@ public class ConsumerGroupTest { // New assignment for member1 Assignment newAssignment = new Assignment(Map.of( topicId, - new HashSet<>(List.of(1)) + Set.of(1) )); consumerGroup.updateTargetAssignment(memberId1, newAssignment); @@ -738,7 +738,7 @@ public class ConsumerGroupTest { // New assignment for member2 to add partition 1 Assignment newAssignment2 = new Assignment(Map.of( topicId, - new HashSet<>(List.of(1)) + Set.of(1) )); consumerGroup.updateTargetAssignment(memberId2, newAssignment2); @@ -753,7 +753,7 @@ public class ConsumerGroupTest { // New assignment for member1 to revoke partition 1 and assign partition 0 Assignment newAssignment1 = new Assignment(Map.of( topicId, - new HashSet<>(List.of(0)) + Set.of(0) )); consumerGroup.updateTargetAssignment(memberId1, newAssignment1); @@ -1119,8 +1119,8 @@ public class ConsumerGroupTest { assertEquals(2, consumerGroup.classicMembersSupportedProtocols().get("range")); assertEquals(1, consumerGroup.classicMembersSupportedProtocols().get("roundrobin")); - assertTrue(consumerGroup.supportsClassicProtocols(ConsumerProtocol.PROTOCOL_TYPE, new HashSet<>(Arrays.asList("range", "sticky")))); - assertFalse(consumerGroup.supportsClassicProtocols(ConsumerProtocol.PROTOCOL_TYPE, new HashSet<>(Arrays.asList("sticky", "roundrobin")))); + assertTrue(consumerGroup.supportsClassicProtocols(ConsumerProtocol.PROTOCOL_TYPE, Set.of("range", "sticky"))); + assertFalse(consumerGroup.supportsClassicProtocols(ConsumerProtocol.PROTOCOL_TYPE, Set.of("sticky", "roundrobin"))); member2 = new ConsumerGroupMember.Builder(member2) .setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata() @@ -1144,7 +1144,7 @@ public class ConsumerGroupTest { assertEquals(2, consumerGroup.classicMembersSupportedProtocols().get("range")); assertEquals(2, consumerGroup.classicMembersSupportedProtocols().get("roundrobin")); - assertTrue(consumerGroup.supportsClassicProtocols(ConsumerProtocol.PROTOCOL_TYPE, new HashSet<>(Arrays.asList("sticky", "roundrobin")))); + assertTrue(consumerGroup.supportsClassicProtocols(ConsumerProtocol.PROTOCOL_TYPE, Set.of("sticky", "roundrobin"))); } @Test diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/modern/share/ShareGroupTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/modern/share/ShareGroupTest.java index 095f6418bae..5c4788b9971 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/modern/share/ShareGroupTest.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/modern/share/ShareGroupTest.java @@ -36,7 +36,6 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import java.util.Arrays; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -220,7 +219,7 @@ public class ShareGroupTest { // Initial assignment for member1 Assignment initialAssignment = new Assignment(Map.of( topicId, - new HashSet<>(List.of(0)) + Set.of(0) )); shareGroup.updateTargetAssignment(memberId1, initialAssignment); @@ -235,7 +234,7 @@ public class ShareGroupTest { // New assignment for member1 Assignment newAssignment = new Assignment(Map.of( topicId, - new HashSet<>(List.of(1)) + Set.of(1) )); shareGroup.updateTargetAssignment(memberId1, newAssignment); @@ -250,7 +249,7 @@ public class ShareGroupTest { // New assignment for member2 to add partition 1 Assignment newAssignment2 = new Assignment(Map.of( topicId, - new HashSet<>(List.of(1)) + Set.of(1) )); shareGroup.updateTargetAssignment(memberId2, newAssignment2); @@ -265,7 +264,7 @@ public class ShareGroupTest { // New assignment for member1 to revoke partition 1 and assign partition 0 Assignment newAssignment1 = new Assignment(Map.of( topicId, - new HashSet<>(List.of(0)) + Set.of(0) )); shareGroup.updateTargetAssignment(memberId1, newAssignment1); diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/assignor/MockAssignorTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/assignor/MockAssignorTest.java index 8584716065f..13a441cdc42 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/assignor/MockAssignorTest.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/assignor/MockAssignorTest.java @@ -57,7 +57,7 @@ public class MockAssignorTest { final AssignmentMemberSpec memberSpec1 = new AssignmentMemberSpec( Optional.empty(), Optional.empty(), - Map.of("test-subtopology", new HashSet<>(List.of(0))), + Map.of("test-subtopology", Set.of(0)), Map.of(), Map.of(), "test-process", @@ -69,7 +69,7 @@ public class MockAssignorTest { final AssignmentMemberSpec memberSpec2 = new AssignmentMemberSpec( Optional.empty(), Optional.empty(), - Map.of("test-subtopology", new HashSet<>(List.of(0))), + Map.of("test-subtopology", Set.of(0)), Map.of(), Map.of(), "test-process", diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/topics/ConfiguredSubtopologyTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/topics/ConfiguredSubtopologyTest.java index 11280b9426b..b217cce0d8c 100644 --- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/topics/ConfiguredSubtopologyTest.java +++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/streams/topics/ConfiguredSubtopologyTest.java @@ -20,7 +20,6 @@ import org.apache.kafka.common.message.StreamsGroupDescribeResponseData; import org.junit.jupiter.api.Test; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -100,8 +99,8 @@ public class ConfiguredSubtopologyTest { @Test public void testAsStreamsGroupDescribeSubtopology() { String subtopologyId = "subtopology1"; - Set sourceTopics = new HashSet<>(Set.of("sourceTopic1", "sourceTopic2")); - Set repartitionSinkTopics = new HashSet<>(Set.of("repartitionSinkTopic1", "repartitionSinkTopic2")); + Set sourceTopics = Set.of("sourceTopic1", "sourceTopic2"); + Set repartitionSinkTopics = Set.of("repartitionSinkTopic1", "repartitionSinkTopic2"); ConfiguredInternalTopic internalTopicMock = mock(ConfiguredInternalTopic.class); StreamsGroupDescribeResponseData.TopicInfo topicInfo = new StreamsGroupDescribeResponseData.TopicInfo(); when(internalTopicMock.asStreamsGroupDescribeTopicInfo()).thenReturn(topicInfo); diff --git a/metadata/src/test/java/org/apache/kafka/controller/AclControlManagerTest.java b/metadata/src/test/java/org/apache/kafka/controller/AclControlManagerTest.java index e68310afc8a..15abdba14b2 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/AclControlManagerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/AclControlManagerTest.java @@ -324,9 +324,9 @@ public class AclControlManagerTest { assertEquals(Optional.empty(), result.exception()); deleted.add(result.aclBinding()); } - assertEquals(new HashSet<>(List.of( + assertEquals(Set.of( TEST_ACLS.get(0).toBinding(), - TEST_ACLS.get(2).toBinding())), deleted); + TEST_ACLS.get(2).toBinding()), deleted); assertEquals(InvalidRequestException.class, deleteResult.response().get(1).exception().get().getClass()); RecordTestUtils.replayAll(manager, deleteResult.records()); diff --git a/metadata/src/test/java/org/apache/kafka/controller/BrokerToElrsTest.java b/metadata/src/test/java/org/apache/kafka/controller/BrokerToElrsTest.java index e3a96a2491c..187b34446fa 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/BrokerToElrsTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/BrokerToElrsTest.java @@ -25,7 +25,6 @@ import org.apache.kafka.timeline.SnapshotRegistry; import org.junit.jupiter.api.Test; import java.util.HashSet; -import java.util.List; import java.util.Set; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -37,7 +36,7 @@ public class BrokerToElrsTest { }; private static Set toSet(TopicIdPartition... partitions) { - return new HashSet<>(List.of(partitions)); + return Set.of(partitions); } private static Set toSet(BrokersToIsrs.PartitionsOnReplicaIterator iterator) { diff --git a/metadata/src/test/java/org/apache/kafka/controller/BrokersToIsrsTest.java b/metadata/src/test/java/org/apache/kafka/controller/BrokersToIsrsTest.java index fc3b8d2899e..886a20050ae 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/BrokersToIsrsTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/BrokersToIsrsTest.java @@ -27,7 +27,6 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import java.util.HashSet; -import java.util.List; import java.util.Set; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -41,7 +40,7 @@ public class BrokersToIsrsTest { }; private static Set toSet(TopicIdPartition... partitions) { - return new HashSet<>(List.of(partitions)); + return Set.of(partitions); } private static Set toSet(PartitionsOnReplicaIterator iterator) { diff --git a/metadata/src/test/java/org/apache/kafka/controller/FeatureControlManagerTest.java b/metadata/src/test/java/org/apache/kafka/controller/FeatureControlManagerTest.java index eac143209dd..5ff9cff626d 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/FeatureControlManagerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/FeatureControlManagerTest.java @@ -41,11 +41,11 @@ import org.junit.jupiter.api.Timeout; import java.util.AbstractMap.SimpleImmutableEntry; import java.util.ArrayList; import java.util.HashMap; -import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.Set; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -257,9 +257,8 @@ public class FeatureControlManagerTest { assertEquals(MetadataVersion.MINIMUM_VERSION, manager.metadataVersionOrThrow()); assertEquals(Optional.of((short) 1), manager.finalizedFeatures(Long.MAX_VALUE).get(TestFeatureVersion.FEATURE_NAME)); assertEquals(Optional.of((short) 2), manager.finalizedFeatures(Long.MAX_VALUE).get(TransactionVersion.FEATURE_NAME)); - assertEquals(new HashSet<>(List.of( - MetadataVersion.FEATURE_NAME, TestFeatureVersion.FEATURE_NAME, TransactionVersion.FEATURE_NAME)), - manager.finalizedFeatures(Long.MAX_VALUE).featureNames()); + assertEquals(Set.of(MetadataVersion.FEATURE_NAME, TestFeatureVersion.FEATURE_NAME, TransactionVersion.FEATURE_NAME), + manager.finalizedFeatures(Long.MAX_VALUE).featureNames()); } private FeatureControlManager createTestManager() { diff --git a/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTest.java b/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTest.java index 4a2e8232425..41ef67ab885 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/QuorumControllerTest.java @@ -660,7 +660,7 @@ public class QuorumControllerTest { ).iterator())); CreateTopicsResponseData createTopicsResponseData = active.createTopics( ANONYMOUS_CONTEXT, createTopicsRequestData, - new HashSet<>(List.of("foo", "bar"))).get(); + Set.of("foo", "bar")).get(); assertEquals(Errors.NONE, Errors.forCode(createTopicsResponseData.topics().find("foo").errorCode())); assertEquals(Errors.NONE, Errors.forCode(createTopicsResponseData.topics().find("bar").errorCode())); Uuid topicIdFoo = createTopicsResponseData.topics().find("foo").topicId(); diff --git a/metadata/src/test/java/org/apache/kafka/controller/ReplicationControlManagerTest.java b/metadata/src/test/java/org/apache/kafka/controller/ReplicationControlManagerTest.java index 1b5bf4fb29d..3a33111a318 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/ReplicationControlManagerTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/ReplicationControlManagerTest.java @@ -1566,7 +1566,7 @@ public class ReplicationControlManagerTest { ctx.unfenceBrokers(0, 1, 3); ControllerRequestContext requestContext = anonymousContextFor(ApiKeys.CREATE_TOPICS); ControllerResult createTopicResult = replicationControl. - createTopics(requestContext, request, new HashSet<>(List.of("foo", "bar", "quux", "foo2"))); + createTopics(requestContext, request, Set.of("foo", "bar", "quux", "foo2")); ctx.replay(createTopicResult.records()); List topics = new ArrayList<>(); topics.add(new CreatePartitionsTopic(). @@ -1690,7 +1690,7 @@ public class ReplicationControlManagerTest { ControllerRequestContext requestContext = anonymousContextFor(ApiKeys.CREATE_TOPICS); ControllerResult createTopicResult = replicationControl. - createTopics(requestContext, request, new HashSet<>(List.of("foo"))); + createTopics(requestContext, request, Set.of("foo")); ctx.replay(createTopicResult.records()); ctx.registerBrokers(0, 1); @@ -2961,12 +2961,12 @@ public class ReplicationControlManagerTest { KRaftClusterDescriber describer = replication.clusterDescriber; HashSet brokers = new HashSet<>(); describer.usableBrokers().forEachRemaining(broker -> brokers.add(broker)); - assertEquals(new HashSet<>(List.of( + assertEquals(Set.of( new UsableBroker(0, Optional.empty(), true), new UsableBroker(1, Optional.empty(), true), new UsableBroker(2, Optional.empty(), false), new UsableBroker(3, Optional.empty(), false), - new UsableBroker(4, Optional.empty(), false))), brokers); + new UsableBroker(4, Optional.empty(), false)), brokers); assertEquals(DirectoryId.MIGRATING, describer.defaultDir(1)); assertEquals(Uuid.fromString("ozwqsVMFSNiYQUPSJA3j0w"), describer.defaultDir(2)); assertEquals(DirectoryId.UNASSIGNED, describer.defaultDir(3)); diff --git a/metadata/src/test/java/org/apache/kafka/controller/metrics/ControllerMetadataMetricsTest.java b/metadata/src/test/java/org/apache/kafka/controller/metrics/ControllerMetadataMetricsTest.java index f47a601a61a..b06c13ec749 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/metrics/ControllerMetadataMetricsTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/metrics/ControllerMetadataMetricsTest.java @@ -26,8 +26,6 @@ import com.yammer.metrics.core.MetricsRegistry; import org.junit.jupiter.api.Test; -import java.util.HashSet; -import java.util.List; import java.util.Optional; import java.util.Set; import java.util.function.BiConsumer; @@ -53,7 +51,7 @@ public class ControllerMetadataMetricsTest { brokerRegistration(false, false) ); ControllerMetricsTestUtils.assertMetricsForTypeEqual(registry, "kafka.controller:", - new HashSet<>(List.of( + Set.of( "kafka.controller:type=KafkaController,name=ActiveBrokerCount", "kafka.controller:type=KafkaController,name=FencedBrokerCount", "kafka.controller:type=KafkaController,name=ControlledShutdownBrokerCount", @@ -67,7 +65,7 @@ public class ControllerMetadataMetricsTest { "kafka.controller:type=KafkaController,name=IgnoredStaticVoters", "kafka.controller:type=ControllerStats,name=UncleanLeaderElectionsPerSec", "kafka.controller:type=ControllerStats,name=ElectionFromEligibleLeaderReplicasPerSec" - ))); + )); } ControllerMetricsTestUtils.assertMetricsForTypeEqual(registry, "KafkaController", Set.of()); diff --git a/metadata/src/test/java/org/apache/kafka/controller/metrics/QuorumControllerMetricsTest.java b/metadata/src/test/java/org/apache/kafka/controller/metrics/QuorumControllerMetricsTest.java index 4698aa86bee..cb4eb63bab6 100644 --- a/metadata/src/test/java/org/apache/kafka/controller/metrics/QuorumControllerMetricsTest.java +++ b/metadata/src/test/java/org/apache/kafka/controller/metrics/QuorumControllerMetricsTest.java @@ -26,8 +26,6 @@ import com.yammer.metrics.core.MetricsRegistry; import org.junit.jupiter.api.Test; -import java.util.HashSet; -import java.util.List; import java.util.Optional; import java.util.Set; @@ -44,7 +42,7 @@ public class QuorumControllerMetricsTest { time, 9000)) { metrics.addTimeSinceLastHeartbeatMetric(1); - HashSet expected = new HashSet<>(List.of( + Set expected = Set.of( "kafka.controller:type=ControllerEventManager,name=EventQueueProcessingTimeMs", "kafka.controller:type=ControllerEventManager,name=EventQueueTimeMs", "kafka.controller:type=KafkaController,name=ActiveControllerCount", @@ -57,7 +55,7 @@ public class QuorumControllerMetricsTest { "kafka.controller:type=KafkaController,name=NewActiveControllersCount", "kafka.controller:type=KafkaController,name=TimedOutBrokerHeartbeatCount", "kafka.controller:type=KafkaController,name=TimeSinceLastHeartbeatReceivedMs,broker=1" - )); + ); ControllerMetricsTestUtils.assertMetricsForTypeEqual(registry, "kafka.controller", expected); } ControllerMetricsTestUtils.assertMetricsForTypeEqual(registry, "kafka.controller", diff --git a/metadata/src/test/java/org/apache/kafka/image/TopicsImageTest.java b/metadata/src/test/java/org/apache/kafka/image/TopicsImageTest.java index 1fb44875c45..44e934bffd0 100644 --- a/metadata/src/test/java/org/apache/kafka/image/TopicsImageTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/TopicsImageTest.java @@ -250,11 +250,11 @@ public class TopicsImageTest { LocalReplicaChanges changes = delta.localChanges(localId); assertEquals( - new HashSet<>(List.of(new TopicPartition("baz", 0))), + Set.of(new TopicPartition("baz", 0)), changes.electedLeaders().keySet() ); assertEquals( - new HashSet<>(List.of(new TopicPartition("baz", 0))), + Set.of(new TopicPartition("baz", 0)), changes.leaders().keySet() ); assertEquals( @@ -306,7 +306,7 @@ public class TopicsImageTest { RecordTestUtils.replayAll(delta, topicRecords); LocalReplicaChanges changes = delta.localChanges(localId); - assertEquals(new HashSet<>(List.of(new TopicPartition("zoo", 0))), changes.deletes()); + assertEquals(Set.of(new TopicPartition("zoo", 0)), changes.deletes()); assertEquals(Map.of(), changes.electedLeaders()); assertEquals(Map.of(), changes.leaders()); assertEquals(Map.of(), changes.followers()); @@ -348,7 +348,7 @@ public class TopicsImageTest { assertEquals(Set.of(), changes.deletes()); assertEquals(Map.of(), changes.electedLeaders()); assertEquals( - new HashSet<>(List.of(new TopicPartition("zoo", 0))), + Set.of(new TopicPartition("zoo", 0)), changes.leaders().keySet() ); assertEquals(Map.of(), changes.followers()); @@ -567,19 +567,19 @@ public class TopicsImageTest { LocalReplicaChanges changes = delta.localChanges(localId); assertEquals( - new HashSet<>(List.of(new TopicPartition("zoo", 2), new TopicPartition("zoo", 3))), + Set.of(new TopicPartition("zoo", 2), new TopicPartition("zoo", 3)), changes.deletes() ); assertEquals( - new HashSet<>(List.of(new TopicPartition("zoo", 0), new TopicPartition("zoo", 4))), + Set.of(new TopicPartition("zoo", 0), new TopicPartition("zoo", 4)), changes.electedLeaders().keySet() ); assertEquals( - new HashSet<>(List.of(new TopicPartition("zoo", 0), new TopicPartition("zoo", 4))), + Set.of(new TopicPartition("zoo", 0), new TopicPartition("zoo", 4)), changes.leaders().keySet() ); assertEquals( - new HashSet<>(List.of(new TopicPartition("zoo", 1), new TopicPartition("zoo", 5))), + Set.of(new TopicPartition("zoo", 1), new TopicPartition("zoo", 5)), changes.followers().keySet() ); @@ -647,9 +647,9 @@ public class TopicsImageTest { assertNull(map.get("baz")); HashSet uuids = new HashSet<>(); map.values().iterator().forEachRemaining(uuids::add); - HashSet expectedUuids = new HashSet<>(List.of( + Set expectedUuids = Set.of( Uuid.fromString("ThIaNwRnSM2Nt9Mx1v0RvA"), - Uuid.fromString("f62ptyETTjet8SL5ZeREiw"))); + Uuid.fromString("f62ptyETTjet8SL5ZeREiw")); assertEquals(expectedUuids, uuids); assertThrows(UnsupportedOperationException.class, () -> map.remove("foo")); assertThrows(UnsupportedOperationException.class, () -> map.put("bar", FOO_UUID)); @@ -666,7 +666,7 @@ public class TopicsImageTest { assertNull(map.get(BAZ_UUID)); HashSet names = new HashSet<>(); map.values().iterator().forEachRemaining(names::add); - HashSet expectedNames = new HashSet<>(List.of("foo", "bar")); + Set expectedNames = Set.of("foo", "bar"); assertEquals(expectedNames, names); assertThrows(UnsupportedOperationException.class, () -> map.remove(FOO_UUID)); assertThrows(UnsupportedOperationException.class, () -> map.put(FOO_UUID, "bar")); diff --git a/metadata/src/test/java/org/apache/kafka/image/loader/metrics/MetadataLoaderMetricsTest.java b/metadata/src/test/java/org/apache/kafka/image/loader/metrics/MetadataLoaderMetricsTest.java index 02ee90044aa..e200b6ce551 100644 --- a/metadata/src/test/java/org/apache/kafka/image/loader/metrics/MetadataLoaderMetricsTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/loader/metrics/MetadataLoaderMetricsTest.java @@ -26,8 +26,6 @@ import com.yammer.metrics.core.MetricsRegistry; import org.junit.jupiter.api.Test; -import java.util.HashSet; -import java.util.List; import java.util.Optional; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; @@ -70,11 +68,11 @@ public class MetadataLoaderMetricsTest { try { try (FakeMetadataLoaderMetrics fakeMetrics = new FakeMetadataLoaderMetrics(registry)) { ControllerMetricsTestUtils.assertMetricsForTypeEqual(registry, "kafka.server", - new HashSet<>(List.of( + Set.of( "kafka.server:type=MetadataLoader,name=CurrentControllerId", "kafka.server:type=MetadataLoader,name=CurrentMetadataVersion", "kafka.server:type=MetadataLoader,name=HandleLoadSnapshotCount" - ))); + )); } ControllerMetricsTestUtils.assertMetricsForTypeEqual(registry, "kafka.server", Set.of()); diff --git a/metadata/src/test/java/org/apache/kafka/image/publisher/ControllerRegistrationsPublisherTest.java b/metadata/src/test/java/org/apache/kafka/image/publisher/ControllerRegistrationsPublisherTest.java index b97643d4a3b..2f9ef2a121d 100644 --- a/metadata/src/test/java/org/apache/kafka/image/publisher/ControllerRegistrationsPublisherTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/publisher/ControllerRegistrationsPublisherTest.java @@ -32,10 +32,9 @@ import org.junit.jupiter.api.Timeout; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; -import java.util.HashSet; -import java.util.List; import java.util.Map; import java.util.OptionalInt; +import java.util.Set; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -91,7 +90,7 @@ public class ControllerRegistrationsPublisherTest { build()); } System.out.println("TEST_IMAGE.cluster = " + TEST_IMAGE.cluster()); - assertEquals(new HashSet<>(List.of(0, 1, 2)), publisher.controllers().keySet()); + assertEquals(Set.of(0, 1, 2), publisher.controllers().keySet()); assertTrue(publisher.controllers().get(0).zkMigrationReady()); assertFalse(publisher.controllers().get(1).zkMigrationReady()); assertFalse(publisher.controllers().get(2).zkMigrationReady()); diff --git a/metadata/src/test/java/org/apache/kafka/image/publisher/metrics/SnapshotEmitterMetricsTest.java b/metadata/src/test/java/org/apache/kafka/image/publisher/metrics/SnapshotEmitterMetricsTest.java index a9ed94a8f66..9af3f11d559 100644 --- a/metadata/src/test/java/org/apache/kafka/image/publisher/metrics/SnapshotEmitterMetricsTest.java +++ b/metadata/src/test/java/org/apache/kafka/image/publisher/metrics/SnapshotEmitterMetricsTest.java @@ -29,8 +29,6 @@ import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.HashSet; -import java.util.List; import java.util.Optional; import java.util.Set; @@ -78,10 +76,10 @@ public class SnapshotEmitterMetricsTest { public void testMetricNames() { try (SnapshotEmitterMetricsTestContext ctx = new SnapshotEmitterMetricsTestContext()) { ControllerMetricsTestUtils.assertMetricsForTypeEqual(ctx.registry, "kafka.server:", - new HashSet<>(List.of( + Set.of( "kafka.server:type=SnapshotEmitter,name=LatestSnapshotGeneratedBytes", "kafka.server:type=SnapshotEmitter,name=LatestSnapshotGeneratedAgeMs" - ))); + )); ctx.metrics.close(); ControllerMetricsTestUtils.assertMetricsForTypeEqual(ctx.registry, "KafkaController", Set.of()); diff --git a/metadata/src/test/java/org/apache/kafka/metadata/ReplicasTest.java b/metadata/src/test/java/org/apache/kafka/metadata/ReplicasTest.java index 53b26680cb1..f6e755e0a0b 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/ReplicasTest.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/ReplicasTest.java @@ -20,7 +20,6 @@ package org.apache.kafka.metadata; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; -import java.util.HashSet; import java.util.List; import java.util.Set; @@ -109,9 +108,9 @@ public class ReplicasTest { @Test public void testToSet() { assertEquals(Set.of(), Replicas.toSet(new int[] {})); - assertEquals(new HashSet<>(List.of(3, 1, 5)), + assertEquals(Set.of(3, 1, 5), Replicas.toSet(new int[] {1, 3, 5})); - assertEquals(new HashSet<>(List.of(1, 2, 10)), + assertEquals(Set.of(1, 2, 10), Replicas.toSet(new int[] {1, 1, 2, 10, 10})); } diff --git a/metadata/src/test/java/org/apache/kafka/metadata/properties/MetaPropertiesEnsembleTest.java b/metadata/src/test/java/org/apache/kafka/metadata/properties/MetaPropertiesEnsembleTest.java index 51675528e50..13daec70fa6 100644 --- a/metadata/src/test/java/org/apache/kafka/metadata/properties/MetaPropertiesEnsembleTest.java +++ b/metadata/src/test/java/org/apache/kafka/metadata/properties/MetaPropertiesEnsembleTest.java @@ -28,7 +28,6 @@ import java.nio.file.Files; import java.util.AbstractMap.SimpleImmutableEntry; import java.util.EnumSet; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Map.Entry; @@ -53,8 +52,8 @@ import static org.junit.jupiter.api.Assertions.assertThrows; public final class MetaPropertiesEnsembleTest { private static final MetaPropertiesEnsemble FOO = new MetaPropertiesEnsemble( - new HashSet<>(List.of("/tmp/empty1", "/tmp/empty2")), - new HashSet<>(List.of("/tmp/error3")), + Set.of("/tmp/empty1", "/tmp/empty2"), + Set.of("/tmp/error3"), Stream.of( new SimpleImmutableEntry<>("/tmp/dir4", new MetaProperties.Builder(). @@ -93,34 +92,34 @@ public final class MetaPropertiesEnsembleTest { @Test public void testEmptyLogDirsForFoo() { - assertEquals(new HashSet<>(List.of("/tmp/empty1", "/tmp/empty2")), + assertEquals(Set.of("/tmp/empty1", "/tmp/empty2"), FOO.emptyLogDirs()); } @Test public void testEmptyLogDirsForEmpty() { - assertEquals(new HashSet<>(), EMPTY.emptyLogDirs()); + assertEquals(Set.of(), EMPTY.emptyLogDirs()); } @Test public void testErrorLogDirsForFoo() { - assertEquals(new HashSet<>(List.of("/tmp/error3")), FOO.errorLogDirs()); + assertEquals(Set.of("/tmp/error3"), FOO.errorLogDirs()); } @Test public void testErrorLogDirsForEmpty() { - assertEquals(new HashSet<>(), EMPTY.errorLogDirs()); + assertEquals(Set.of(), EMPTY.errorLogDirs()); } @Test public void testLogDirPropsForFoo() { - assertEquals(new HashSet<>(List.of("/tmp/dir4", "/tmp/dir5")), + assertEquals(Set.of("/tmp/dir4", "/tmp/dir5"), FOO.logDirProps().keySet()); } @Test public void testLogDirPropsForEmpty() { - assertEquals(new HashSet<>(), + assertEquals(Set.of(), EMPTY.logDirProps().keySet()); } diff --git a/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientReconfigTest.java b/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientReconfigTest.java index b000fbbcd59..0c797d3ada3 100644 --- a/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientReconfigTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientReconfigTest.java @@ -56,13 +56,13 @@ import org.mockito.Mockito; import java.net.InetSocketAddress; import java.nio.ByteBuffer; import java.util.HashMap; -import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.OptionalInt; import java.util.OptionalLong; +import java.util.Set; import java.util.concurrent.ThreadLocalRandom; import java.util.stream.IntStream; import java.util.stream.Stream; @@ -1127,7 +1127,7 @@ public class KafkaRaftClientReconfigTest { context.pollUntilRequest(); context.collectEndQuorumRequests( epoch, - new HashSet<>(List.of(follower1.id(), follower2.id())), + Set.of(follower1.id(), follower2.id()), Optional.empty() ); diff --git a/raft/src/test/java/org/apache/kafka/raft/VoterSetTest.java b/raft/src/test/java/org/apache/kafka/raft/VoterSetTest.java index 985c9b4929a..307b6aa59a1 100644 --- a/raft/src/test/java/org/apache/kafka/raft/VoterSetTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/VoterSetTest.java @@ -29,8 +29,6 @@ import org.junit.jupiter.params.provider.ValueSource; import java.net.InetSocketAddress; import java.util.HashMap; -import java.util.HashSet; -import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; @@ -85,7 +83,7 @@ public final class VoterSetTest { @Test void testVoterIds() { VoterSet voterSet = VoterSet.fromMap(voterMap(IntStream.of(1, 2, 3), true)); - assertEquals(new HashSet<>(List.of(1, 2, 3)), voterSet.voterIds()); + assertEquals(Set.of(1, 2, 3), voterSet.voterIds()); } @Test diff --git a/server-common/src/test/java/org/apache/kafka/server/network/EndpointReadyFuturesTest.java b/server-common/src/test/java/org/apache/kafka/server/network/EndpointReadyFuturesTest.java index 8ec3711ff8c..34eec1bdc6c 100644 --- a/server-common/src/test/java/org/apache/kafka/server/network/EndpointReadyFuturesTest.java +++ b/server-common/src/test/java/org/apache/kafka/server/network/EndpointReadyFuturesTest.java @@ -24,10 +24,10 @@ import org.apache.kafka.common.security.auth.SecurityProtocol; import org.junit.jupiter.api.Test; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; @@ -98,7 +98,7 @@ public final class EndpointReadyFuturesTest { public void testImmediateCompletion() { EndpointReadyFutures readyFutures = new EndpointReadyFutures.Builder(). build(Optional.empty(), INFO); - assertEquals(new HashSet<>(List.of(EXTERNAL, INTERNAL)), + assertEquals(Set.of(EXTERNAL, INTERNAL), readyFutures.futures().keySet()); assertComplete(readyFutures, EXTERNAL, INTERNAL); } @@ -109,7 +109,7 @@ public final class EndpointReadyFuturesTest { EndpointReadyFutures readyFutures = new EndpointReadyFutures.Builder(). addReadinessFuture("foo", foo). build(Optional.empty(), INFO); - assertEquals(new HashSet<>(List.of(EXTERNAL, INTERNAL)), + assertEquals(Set.of(EXTERNAL, INTERNAL), readyFutures.futures().keySet()); assertIncomplete(readyFutures, EXTERNAL, INTERNAL); foo.complete(null); @@ -124,7 +124,7 @@ public final class EndpointReadyFuturesTest { addReadinessFuture("foo", foo). addReadinessFuture("bar", bar). build(Optional.empty(), INFO); - assertEquals(new HashSet<>(List.of(EXTERNAL, INTERNAL)), + assertEquals(Set.of(EXTERNAL, INTERNAL), readyFutures.futures().keySet()); assertIncomplete(readyFutures, EXTERNAL, INTERNAL); foo.complete(null); @@ -141,7 +141,7 @@ public final class EndpointReadyFuturesTest { EndpointReadyFutures readyFutures = new EndpointReadyFutures.Builder(). addReadinessFutures("baz", bazFutures). build(Optional.empty(), INFO); - assertEquals(new HashSet<>(List.of(EXTERNAL, INTERNAL)), + assertEquals(Set.of(EXTERNAL, INTERNAL), readyFutures.futures().keySet()); assertIncomplete(readyFutures, EXTERNAL, INTERNAL); bazFutures.get(EXTERNAL).complete(null); @@ -159,7 +159,7 @@ public final class EndpointReadyFuturesTest { addReadinessFuture("foo", foo). addReadinessFuture("bar", bar). build(Optional.empty(), INFO); - assertEquals(new HashSet<>(List.of(EXTERNAL, INTERNAL)), + assertEquals(Set.of(EXTERNAL, INTERNAL), readyFutures.futures().keySet()); assertIncomplete(readyFutures, EXTERNAL, INTERNAL); foo.complete(null); diff --git a/server/src/main/java/org/apache/kafka/security/authorizer/AclEntry.java b/server/src/main/java/org/apache/kafka/security/authorizer/AclEntry.java index 59d4864cde9..c5232e4fdd3 100644 --- a/server/src/main/java/org/apache/kafka/security/authorizer/AclEntry.java +++ b/server/src/main/java/org/apache/kafka/security/authorizer/AclEntry.java @@ -23,7 +23,6 @@ import org.apache.kafka.common.resource.ResourceType; import org.apache.kafka.common.security.auth.KafkaPrincipal; import java.util.Arrays; -import java.util.HashSet; import java.util.Set; import java.util.stream.Collectors; @@ -54,17 +53,17 @@ public class AclEntry { public static Set supportedOperations(ResourceType resourceType) { switch (resourceType) { case TOPIC: - return new HashSet<>(Arrays.asList(READ, WRITE, CREATE, DESCRIBE, DELETE, ALTER, DESCRIBE_CONFIGS, ALTER_CONFIGS)); + return Set.of(READ, WRITE, CREATE, DESCRIBE, DELETE, ALTER, DESCRIBE_CONFIGS, ALTER_CONFIGS); case GROUP: - return new HashSet<>(Arrays.asList(READ, DESCRIBE, DELETE, DESCRIBE_CONFIGS, ALTER_CONFIGS)); + return Set.of(READ, DESCRIBE, DELETE, DESCRIBE_CONFIGS, ALTER_CONFIGS); case CLUSTER: - return new HashSet<>(Arrays.asList(CREATE, CLUSTER_ACTION, DESCRIBE_CONFIGS, ALTER_CONFIGS, IDEMPOTENT_WRITE, ALTER, DESCRIBE)); + return Set.of(CREATE, CLUSTER_ACTION, DESCRIBE_CONFIGS, ALTER_CONFIGS, IDEMPOTENT_WRITE, ALTER, DESCRIBE); case TRANSACTIONAL_ID: - return new HashSet<>(Arrays.asList(DESCRIBE, WRITE, TWO_PHASE_COMMIT)); + return Set.of(DESCRIBE, WRITE, TWO_PHASE_COMMIT); case DELEGATION_TOKEN: return Set.of(DESCRIBE); case USER: - return new HashSet<>(Arrays.asList(CREATE_TOKENS, DESCRIBE_TOKENS)); + return Set.of(CREATE_TOKENS, DESCRIBE_TOKENS); default: throw new IllegalArgumentException("Not a concrete resource type"); } diff --git a/share-coordinator/src/test/java/org/apache/kafka/coordinator/share/ShareCoordinatorServiceTest.java b/share-coordinator/src/test/java/org/apache/kafka/coordinator/share/ShareCoordinatorServiceTest.java index 542d9cd8d0d..3445e241eeb 100644 --- a/share-coordinator/src/test/java/org/apache/kafka/coordinator/share/ShareCoordinatorServiceTest.java +++ b/share-coordinator/src/test/java/org/apache/kafka/coordinator/share/ShareCoordinatorServiceTest.java @@ -212,7 +212,7 @@ class ShareCoordinatorServiceTest { HashSet result = new HashSet<>(future.get(5, TimeUnit.SECONDS).results()); - HashSet expectedResult = new HashSet<>(List.of( + Set expectedResult = Set.of( new WriteShareGroupStateResponseData.WriteStateResult() .setTopicId(topicId2) .setPartitions(List.of(new WriteShareGroupStateResponseData.PartitionResult() @@ -220,15 +220,15 @@ class ShareCoordinatorServiceTest { new WriteShareGroupStateResponseData.WriteStateResult() .setTopicId(topicId1) .setPartitions(List.of(new WriteShareGroupStateResponseData.PartitionResult() - .setPartition(partition1))))); + .setPartition(partition1)))); assertEquals(expectedResult, result); verify(time, times(2)).hiResClockMs(); - Set expectedMetrics = new HashSet<>(List.of( + Set expectedMetrics = Set.of( metrics.metricName("write-latency-avg", ShareCoordinatorMetrics.METRICS_GROUP), metrics.metricName("write-latency-max", ShareCoordinatorMetrics.METRICS_GROUP), metrics.metricName("write-rate", ShareCoordinatorMetrics.METRICS_GROUP), metrics.metricName("write-total", ShareCoordinatorMetrics.METRICS_GROUP) - )); + ); expectedMetrics.forEach(metric -> assertTrue(metrics.metrics().containsKey(metric))); } @@ -329,9 +329,9 @@ class ShareCoordinatorServiceTest { HashSet result = new HashSet<>(future.get(5, TimeUnit.SECONDS).results()); - HashSet expectedResult = new HashSet<>(List.of( + Set expectedResult = Set.of( topicData1, - topicData2)); + topicData2); assertEquals(expectedResult, result); } @@ -411,9 +411,9 @@ class ShareCoordinatorServiceTest { HashSet result = new HashSet<>(future.get(5, TimeUnit.SECONDS).results()); - HashSet expectedResult = new HashSet<>(List.of( + Set expectedResult = Set.of( topicData1, - topicData2)); + topicData2); assertEquals(expectedResult, result); } @@ -488,7 +488,7 @@ class ShareCoordinatorServiceTest { HashSet result = new HashSet<>(future.get(5, TimeUnit.SECONDS).results()); - HashSet expectedResult = new HashSet<>(List.of( + Set expectedResult = Set.of( new DeleteShareGroupStateResponseData.DeleteStateResult() .setTopicId(topicId2) .setPartitions(List.of(new DeleteShareGroupStateResponseData.PartitionResult() @@ -496,7 +496,7 @@ class ShareCoordinatorServiceTest { new DeleteShareGroupStateResponseData.DeleteStateResult() .setTopicId(topicId1) .setPartitions(List.of(new DeleteShareGroupStateResponseData.PartitionResult() - .setPartition(partition1))))); + .setPartition(partition1)))); assertEquals(expectedResult, result); } @@ -573,7 +573,7 @@ class ShareCoordinatorServiceTest { HashSet result = new HashSet<>(future.get(5, TimeUnit.SECONDS).results()); - HashSet expectedResult = new HashSet<>(List.of( + Set expectedResult = Set.of( new InitializeShareGroupStateResponseData.InitializeStateResult() .setTopicId(topicId2) .setPartitions(List.of(new InitializeShareGroupStateResponseData.PartitionResult() @@ -582,7 +582,7 @@ class ShareCoordinatorServiceTest { .setTopicId(topicId1) .setPartitions(List.of(new InitializeShareGroupStateResponseData.PartitionResult() .setPartition(partition1))) - )); + ); assertEquals(expectedResult, result); } @@ -890,7 +890,7 @@ class ShareCoordinatorServiceTest { HashSet result = new HashSet<>(future.get(5, TimeUnit.SECONDS).results()); - HashSet expectedResult = new HashSet<>(List.of( + Set expectedResult = Set.of( new WriteShareGroupStateResponseData.WriteStateResult() .setTopicId(topicId2) .setPartitions(List.of(new WriteShareGroupStateResponseData.PartitionResult() @@ -902,7 +902,7 @@ class ShareCoordinatorServiceTest { .setPartitions(List.of(new WriteShareGroupStateResponseData.PartitionResult() .setPartition(partition1) .setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code()) - .setErrorMessage("Share coordinator is not available."))))); + .setErrorMessage("Share coordinator is not available.")))); assertEquals(expectedResult, result); } @@ -954,7 +954,7 @@ class ShareCoordinatorServiceTest { HashSet result = new HashSet<>(future.get(5, TimeUnit.SECONDS).results()); - HashSet expectedResult = new HashSet<>(List.of( + Set expectedResult = Set.of( new ReadShareGroupStateResponseData.ReadStateResult() .setTopicId(topicId2) .setPartitions(List.of(new ReadShareGroupStateResponseData.PartitionResult() @@ -966,7 +966,7 @@ class ShareCoordinatorServiceTest { .setPartitions(List.of(new ReadShareGroupStateResponseData.PartitionResult() .setPartition(partition1) .setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code()) - .setErrorMessage("Share coordinator is not available."))))); + .setErrorMessage("Share coordinator is not available.")))); assertEquals(expectedResult, result); } @@ -1018,7 +1018,7 @@ class ShareCoordinatorServiceTest { HashSet result = new HashSet<>(future.get(5, TimeUnit.SECONDS).results()); - HashSet expectedResult = new HashSet<>(List.of( + Set expectedResult = Set.of( new ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult() .setTopicId(topicId2) .setPartitions(List.of(new ReadShareGroupStateSummaryResponseData.PartitionResult() @@ -1030,7 +1030,7 @@ class ShareCoordinatorServiceTest { .setPartitions(List.of(new ReadShareGroupStateSummaryResponseData.PartitionResult() .setPartition(partition1) .setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code()) - .setErrorMessage("Share coordinator is not available."))))); + .setErrorMessage("Share coordinator is not available.")))); assertEquals(expectedResult, result); } @@ -1080,7 +1080,7 @@ class ShareCoordinatorServiceTest { HashSet result = new HashSet<>(future.get(5, TimeUnit.SECONDS).results()); - HashSet expectedResult = new HashSet<>(List.of( + Set expectedResult = Set.of( new DeleteShareGroupStateResponseData.DeleteStateResult() .setTopicId(topicId2) .setPartitions(List.of(new DeleteShareGroupStateResponseData.PartitionResult() @@ -1092,7 +1092,7 @@ class ShareCoordinatorServiceTest { .setPartitions(List.of(new DeleteShareGroupStateResponseData.PartitionResult() .setPartition(partition1) .setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code()) - .setErrorMessage("Share coordinator is not available."))))); + .setErrorMessage("Share coordinator is not available.")))); assertEquals(expectedResult, result); } @@ -1141,7 +1141,7 @@ class ShareCoordinatorServiceTest { HashSet result = new HashSet<>(future.get(5, TimeUnit.SECONDS).results()); - HashSet expectedResult = new HashSet<>(List.of( + Set expectedResult = Set.of( new InitializeShareGroupStateResponseData.InitializeStateResult() .setTopicId(topicId2) .setPartitions(List.of(new InitializeShareGroupStateResponseData.PartitionResult() @@ -1153,7 +1153,7 @@ class ShareCoordinatorServiceTest { .setPartitions(List.of(new InitializeShareGroupStateResponseData.PartitionResult() .setPartition(partition1) .setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code()) - .setErrorMessage("Share coordinator is not available."))))); + .setErrorMessage("Share coordinator is not available.")))); assertEquals(expectedResult, result); } @@ -2249,12 +2249,12 @@ class ShareCoordinatorServiceTest { } private void checkMetrics(Metrics metrics) { - Set usualMetrics = new HashSet<>(List.of( + Set usualMetrics = Set.of( metrics.metricName("write-latency-avg", ShareCoordinatorMetrics.METRICS_GROUP), metrics.metricName("write-latency-max", ShareCoordinatorMetrics.METRICS_GROUP), metrics.metricName("write-rate", ShareCoordinatorMetrics.METRICS_GROUP), metrics.metricName("write-total", ShareCoordinatorMetrics.METRICS_GROUP) - )); + ); usualMetrics.forEach(metric -> assertTrue(metrics.metrics().containsKey(metric))); } diff --git a/share-coordinator/src/test/java/org/apache/kafka/coordinator/share/metrics/ShareCoordinatorMetricsTest.java b/share-coordinator/src/test/java/org/apache/kafka/coordinator/share/metrics/ShareCoordinatorMetricsTest.java index 6b3c0a6490b..26bd6ddf773 100644 --- a/share-coordinator/src/test/java/org/apache/kafka/coordinator/share/metrics/ShareCoordinatorMetricsTest.java +++ b/share-coordinator/src/test/java/org/apache/kafka/coordinator/share/metrics/ShareCoordinatorMetricsTest.java @@ -26,9 +26,8 @@ import org.apache.kafka.timeline.SnapshotRegistry; import org.junit.jupiter.api.Test; -import java.util.HashSet; -import java.util.List; import java.util.Map; +import java.util.Set; import static org.apache.kafka.coordinator.share.metrics.ShareCoordinatorMetrics.SHARE_COORDINATOR_WRITE_LATENCY_SENSOR_NAME; import static org.apache.kafka.coordinator.share.metrics.ShareCoordinatorMetrics.SHARE_COORDINATOR_WRITE_SENSOR_NAME; @@ -42,12 +41,12 @@ public class ShareCoordinatorMetricsTest { public void testMetricNames() { Metrics metrics = new Metrics(); - HashSet expectedMetrics = new HashSet<>(List.of( + Set expectedMetrics = Set.of( metrics.metricName("write-rate", ShareCoordinatorMetrics.METRICS_GROUP), metrics.metricName("write-total", ShareCoordinatorMetrics.METRICS_GROUP), metrics.metricName("write-latency-avg", ShareCoordinatorMetrics.METRICS_GROUP), metrics.metricName("write-latency-max", ShareCoordinatorMetrics.METRICS_GROUP) - )); + ); ShareCoordinatorMetrics coordMetrics = new ShareCoordinatorMetrics(metrics); for (MetricName metricName : expectedMetrics) { diff --git a/storage/src/test/java/org/apache/kafka/storage/internals/log/ProducerStateManagerTest.java b/storage/src/test/java/org/apache/kafka/storage/internals/log/ProducerStateManagerTest.java index 86388b56c94..f7420b45ffb 100644 --- a/storage/src/test/java/org/apache/kafka/storage/internals/log/ProducerStateManagerTest.java +++ b/storage/src/test/java/org/apache/kafka/storage/internals/log/ProducerStateManagerTest.java @@ -44,7 +44,6 @@ import java.nio.file.StandardOpenOption; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; @@ -607,7 +606,7 @@ public class ProducerStateManagerTest { appendClientEntry(stateManager, producerId, epoch, 2, 2L, false); stateManager.takeSnapshot(); assertEquals(2, Objects.requireNonNull(logDir.listFiles()).length); - assertEquals(new HashSet<>(List.of(2L, 3L)), currentSnapshotOffsets()); + assertEquals(Set.of(2L, 3L), currentSnapshotOffsets()); stateManager.deleteSnapshotsBefore(3L); assertEquals(1, Objects.requireNonNull(logDir.listFiles()).length); @@ -629,7 +628,7 @@ public class ProducerStateManagerTest { appendClientEntry(stateManager, producerId, epoch, 2, 2L, false); stateManager.takeSnapshot(); assertEquals(2, Objects.requireNonNull(logDir.listFiles()).length); - assertEquals(new HashSet<>(List.of(2L, 3L)), currentSnapshotOffsets()); + assertEquals(Set.of(2L, 3L), currentSnapshotOffsets()); stateManager.truncateFullyAndStartAt(0L); @@ -655,7 +654,7 @@ public class ProducerStateManagerTest { appendClientEntry(stateManager, producerId, epoch, 4, 4L, false); stateManager.takeSnapshot(); assertEquals(2, Objects.requireNonNull(logDir.listFiles()).length); - assertEquals(new HashSet<>(List.of(3L, 5L)), currentSnapshotOffsets()); + assertEquals(Set.of(3L, 5L), currentSnapshotOffsets()); // Truncate to the range (3, 5), this will delete the earlier snapshot until offset 3. stateManager.truncateAndReload(3, 5, time.milliseconds()); diff --git a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/KTableKTableForeignKeyInnerJoinCustomPartitionerIntegrationTest.java b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/KTableKTableForeignKeyInnerJoinCustomPartitionerIntegrationTest.java index 2e4faf6551a..615ebac78c1 100644 --- a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/KTableKTableForeignKeyInnerJoinCustomPartitionerIntegrationTest.java +++ b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/KTableKTableForeignKeyInnerJoinCustomPartitionerIntegrationTest.java @@ -96,7 +96,7 @@ public class KTableKTableForeignKeyInnerJoinCustomPartitionerIntegrationTest { @Override public Optional> partitions(final String topic, final String key, final Void value, final int numPartitions) { - return Optional.of(new HashSet<>(Arrays.asList(0, 1, 2))); + return Optional.of(Set.of(0, 1, 2)); } } diff --git a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/QueryableStateIntegrationTest.java b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/QueryableStateIntegrationTest.java index ca3936633fc..2eb35e826a0 100644 --- a/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/QueryableStateIntegrationTest.java +++ b/streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/QueryableStateIntegrationTest.java @@ -747,7 +747,7 @@ public class QueryableStateIntegrationTest { new KeyValue<>(keys[4], 2L)) ); final Set> expectedBatch1 = - new HashSet<>(Collections.singleton(new KeyValue<>(keys[4], 2L))); + Set.of(new KeyValue<>(keys[4], 2L)); IntegrationTestUtils.produceKeyValuesSynchronously( streamOne, @@ -919,7 +919,7 @@ public class QueryableStateIntegrationTest { new KeyValue<>(keys[4], "2")) ); final Set> expectedBatch1 = - new HashSet<>(Collections.singleton(new KeyValue<>(keys[4], 2L))); + Set.of(new KeyValue<>(keys[4], 2L)); IntegrationTestUtils.produceKeyValuesSynchronously( streamOne, diff --git a/streams/src/main/java/org/apache/kafka/streams/state/QueryableStoreTypes.java b/streams/src/main/java/org/apache/kafka/streams/state/QueryableStoreTypes.java index 800c30e478d..e809d0a8b9e 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/QueryableStoreTypes.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/QueryableStoreTypes.java @@ -25,9 +25,7 @@ import org.apache.kafka.streams.state.internals.CompositeReadOnlySessionStore; import org.apache.kafka.streams.state.internals.CompositeReadOnlyWindowStore; import org.apache.kafka.streams.state.internals.StateStoreProvider; -import java.util.Arrays; import java.util.Collections; -import java.util.HashSet; import java.util.Set; /** @@ -129,9 +127,9 @@ public final class QueryableStoreTypes { extends QueryableStoreTypeMatcher>> { TimestampedKeyValueStoreType() { - super(new HashSet<>(Arrays.asList( + super(Set.of( TimestampedKeyValueStore.class, - ReadOnlyKeyValueStore.class))); + ReadOnlyKeyValueStore.class)); } @Override @@ -158,9 +156,9 @@ public final class QueryableStoreTypes { extends QueryableStoreTypeMatcher>> { TimestampedWindowStoreType() { - super(new HashSet<>(Arrays.asList( + super(Set.of( TimestampedWindowStore.class, - ReadOnlyWindowStore.class))); + ReadOnlyWindowStore.class)); } @Override diff --git a/streams/src/test/java/org/apache/kafka/streams/TopologyTest.java b/streams/src/test/java/org/apache/kafka/streams/TopologyTest.java index f85c1e8ae37..7d172c3c89a 100644 --- a/streams/src/test/java/org/apache/kafka/streams/TopologyTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/TopologyTest.java @@ -68,7 +68,6 @@ import org.mockito.junit.jupiter.MockitoSettings; import org.mockito.quality.Strictness; import java.time.Duration; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -2275,7 +2274,7 @@ public class TopologyTest { private TopologyDescription.Source addSource(final String sourceName, final String... sourceTopic) { topology.addSource((AutoOffsetReset) null, sourceName, null, null, null, sourceTopic); - return new InternalTopologyBuilder.Source(sourceName, new HashSet<>(Arrays.asList(sourceTopic)), null); + return new InternalTopologyBuilder.Source(sourceName, Set.of(sourceTopic), null); } @SuppressWarnings("deprecation") @@ -2325,7 +2324,7 @@ public class TopologyTest { topology.connectProcessorAndStateStores(processorName, storeNames); } final TopologyDescription.Processor expectedProcessorNode = - new InternalTopologyBuilder.Processor(processorName, new HashSet<>(Arrays.asList(storeNames))); + new InternalTopologyBuilder.Processor(processorName, Set.of(storeNames)); for (final TopologyDescription.Node parent : parents) { ((InternalTopologyBuilder.AbstractNode) parent).addSuccessor(expectedProcessorNode); diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamJoinTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamJoinTest.java index d9bd9382642..b7fb8669697 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamJoinTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamJoinTest.java @@ -66,10 +66,8 @@ import org.mockito.Mockito; import java.time.Duration; import java.time.Instant; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Optional; @@ -585,7 +583,7 @@ public class KStreamKStreamJoinTest { TopologyWrapper.getInternalTopologyBuilder(builder.build()).copartitionGroups(); assertEquals(1, copartitionGroups.size()); - assertEquals(new HashSet<>(Arrays.asList(topic1, topic2)), copartitionGroups.iterator().next()); + assertEquals(Set.of(topic1, topic2), copartitionGroups.iterator().next()); try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { final TestInputTopic inputTopic1 = @@ -707,7 +705,7 @@ public class KStreamKStreamJoinTest { TopologyWrapper.getInternalTopologyBuilder(builder.build()).copartitionGroups(); assertEquals(1, copartitionGroups.size()); - assertEquals(new HashSet<>(Arrays.asList(topic1, topic2)), copartitionGroups.iterator().next()); + assertEquals(Set.of(topic1, topic2), copartitionGroups.iterator().next()); try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { final TestInputTopic inputTopic1 = @@ -830,7 +828,7 @@ public class KStreamKStreamJoinTest { TopologyWrapper.getInternalTopologyBuilder(builder.build()).copartitionGroups(); assertEquals(1, copartitionGroups.size()); - assertEquals(new HashSet<>(Arrays.asList(topic1, topic2)), copartitionGroups.iterator().next()); + assertEquals(Set.of(topic1, topic2), copartitionGroups.iterator().next()); try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { final TestInputTopic inputTopic1 = @@ -1395,7 +1393,7 @@ public class KStreamKStreamJoinTest { TopologyWrapper.getInternalTopologyBuilder(builder.build()).copartitionGroups(); assertEquals(1, copartitionGroups.size()); - assertEquals(new HashSet<>(Arrays.asList(topic1, topic2)), copartitionGroups.iterator().next()); + assertEquals(Set.of(topic1, topic2), copartitionGroups.iterator().next()); try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { final TestInputTopic inputTopic1 = @@ -1662,7 +1660,7 @@ public class KStreamKStreamJoinTest { TopologyWrapper.getInternalTopologyBuilder(builder.build()).copartitionGroups(); assertEquals(1, copartitionGroups.size()); - assertEquals(new HashSet<>(Arrays.asList(topic1, topic2)), copartitionGroups.iterator().next()); + assertEquals(Set.of(topic1, topic2), copartitionGroups.iterator().next()); try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { final TestInputTopic inputTopic1 = diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamLeftJoinTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamLeftJoinTest.java index d56abacdcb2..74ebaa0e844 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamLeftJoinTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamLeftJoinTest.java @@ -39,9 +39,7 @@ import org.junit.jupiter.api.Test; import java.time.Duration; import java.time.Instant; -import java.util.Arrays; import java.util.Collection; -import java.util.HashSet; import java.util.Properties; import java.util.Set; @@ -457,7 +455,7 @@ public class KStreamKStreamLeftJoinTest { TopologyWrapper.getInternalTopologyBuilder(builder.build()).copartitionGroups(); assertEquals(1, copartitionGroups.size()); - assertEquals(new HashSet<>(Arrays.asList(topic1, topic2)), copartitionGroups.iterator().next()); + assertEquals(Set.of(topic1, topic2), copartitionGroups.iterator().next()); try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), PROPS)) { final TestInputTopic inputTopic1 = @@ -658,7 +656,7 @@ public class KStreamKStreamLeftJoinTest { TopologyWrapper.getInternalTopologyBuilder(builder.build()).copartitionGroups(); assertEquals(1, copartitionGroups.size()); - assertEquals(new HashSet<>(Arrays.asList(topic1, topic2)), copartitionGroups.iterator().next()); + assertEquals(Set.of(topic1, topic2), copartitionGroups.iterator().next()); try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), PROPS)) { final TestInputTopic inputTopic1 = @@ -821,7 +819,7 @@ public class KStreamKStreamLeftJoinTest { TopologyWrapper.getInternalTopologyBuilder(builder.build()).copartitionGroups(); assertEquals(1, copartitionGroups.size()); - assertEquals(new HashSet<>(Arrays.asList(topic1, topic2)), copartitionGroups.iterator().next()); + assertEquals(Set.of(topic1, topic2), copartitionGroups.iterator().next()); try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), PROPS)) { final TestInputTopic inputTopic1 = @@ -893,7 +891,7 @@ public class KStreamKStreamLeftJoinTest { TopologyWrapper.getInternalTopologyBuilder(builder.build()).copartitionGroups(); assertEquals(1, copartitionGroups.size()); - assertEquals(new HashSet<>(Arrays.asList(topic1, topic2)), copartitionGroups.iterator().next()); + assertEquals(Set.of(topic1, topic2), copartitionGroups.iterator().next()); try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), PROPS)) { final TestInputTopic inputTopic1 = diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamOuterJoinTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamOuterJoinTest.java index be923f7f3ea..5fc4e0a418b 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamOuterJoinTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKStreamOuterJoinTest.java @@ -49,9 +49,7 @@ import org.junit.jupiter.api.Test; import java.time.Duration; import java.time.Instant; -import java.util.Arrays; import java.util.Collection; -import java.util.HashSet; import java.util.Properties; import java.util.Set; import java.util.concurrent.atomic.AtomicReference; @@ -472,7 +470,7 @@ public class KStreamKStreamOuterJoinTest { TopologyWrapper.getInternalTopologyBuilder(builder.build()).copartitionGroups(); assertEquals(1, copartitionGroups.size()); - assertEquals(new HashSet<>(Arrays.asList(topic1, topic2)), copartitionGroups.iterator().next()); + assertEquals(Set.of(topic1, topic2), copartitionGroups.iterator().next()); try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), PROPS)) { final TestInputTopic inputTopic1 = @@ -538,7 +536,7 @@ public class KStreamKStreamOuterJoinTest { TopologyWrapper.getInternalTopologyBuilder(builder.build()).copartitionGroups(); assertEquals(1, copartitionGroups.size()); - assertEquals(new HashSet<>(Arrays.asList(topic1, topic2)), copartitionGroups.iterator().next()); + assertEquals(Set.of(topic1, topic2), copartitionGroups.iterator().next()); try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), PROPS)) { final TestInputTopic inputTopic1 = @@ -666,7 +664,7 @@ public class KStreamKStreamOuterJoinTest { TopologyWrapper.getInternalTopologyBuilder(builder.build()).copartitionGroups(); assertEquals(1, copartitionGroups.size()); - assertEquals(new HashSet<>(Arrays.asList(topic1, topic2)), copartitionGroups.iterator().next()); + assertEquals(Set.of(topic1, topic2), copartitionGroups.iterator().next()); try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), PROPS)) { final TestInputTopic inputTopic1 = @@ -779,7 +777,7 @@ public class KStreamKStreamOuterJoinTest { TopologyWrapper.getInternalTopologyBuilder(builder.build()).copartitionGroups(); assertEquals(1, copartitionGroups.size()); - assertEquals(new HashSet<>(Arrays.asList(topic1, topic2)), copartitionGroups.iterator().next()); + assertEquals(Set.of(topic1, topic2), copartitionGroups.iterator().next()); try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), PROPS)) { final TestInputTopic inputTopic1 = diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKTableJoinTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKTableJoinTest.java index 1419fd716c2..19a8b5e5205 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKTableJoinTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKTableJoinTest.java @@ -46,9 +46,7 @@ import org.junit.jupiter.api.Test; import java.time.Duration; import java.time.Instant; -import java.util.Arrays; import java.util.Collection; -import java.util.HashSet; import java.util.Properties; import java.util.Random; import java.util.Set; @@ -335,7 +333,7 @@ public class KStreamKTableJoinTest { TopologyWrapper.getInternalTopologyBuilder(builder.build()).copartitionGroups(); assertEquals(1, copartitionGroups.size()); - assertEquals(new HashSet<>(Arrays.asList(streamTopic, tableTopic)), copartitionGroups.iterator().next()); + assertEquals(Set.of(streamTopic, tableTopic), copartitionGroups.iterator().next()); } @Test diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKTableLeftJoinTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKTableLeftJoinTest.java index 25eafd3043b..f78432b95e7 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKTableLeftJoinTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamKTableLeftJoinTest.java @@ -40,9 +40,7 @@ import org.junit.jupiter.api.Test; import java.time.Duration; import java.time.Instant; -import java.util.Arrays; import java.util.Collection; -import java.util.HashSet; import java.util.Properties; import java.util.Random; import java.util.Set; @@ -119,7 +117,7 @@ public class KStreamKTableLeftJoinTest { TopologyWrapper.getInternalTopologyBuilder(builder.build()).copartitionGroups(); assertEquals(1, copartitionGroups.size()); - assertEquals(new HashSet<>(Arrays.asList(streamTopic, tableTopic)), copartitionGroups.iterator().next()); + assertEquals(Set.of(streamTopic, tableTopic), copartitionGroups.iterator().next()); } @Test diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableInnerJoinTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableInnerJoinTest.java index cb0f14423e1..9d1e7adec95 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableInnerJoinTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableInnerJoinTest.java @@ -42,9 +42,7 @@ import org.junit.jupiter.api.Test; import java.time.Duration; import java.time.Instant; -import java.util.Arrays; import java.util.Collection; -import java.util.HashSet; import java.util.Properties; import java.util.Set; @@ -374,7 +372,7 @@ public class KTableKTableInnerJoinTest { TopologyWrapper.getInternalTopologyBuilder(builder.build()).copartitionGroups(); assertEquals(1, copartitionGroups.size()); - assertEquals(new HashSet<>(Arrays.asList(topic1, topic2)), copartitionGroups.iterator().next()); + assertEquals(Set.of(topic1, topic2), copartitionGroups.iterator().next()); try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { final TestInputTopic inputTopic1 = diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableLeftJoinTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableLeftJoinTest.java index aee8b1fb4dd..87a38cc7a44 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableLeftJoinTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableLeftJoinTest.java @@ -50,9 +50,7 @@ import org.junit.jupiter.api.Test; import java.time.Duration; import java.time.Instant; -import java.util.Arrays; import java.util.Collection; -import java.util.HashSet; import java.util.Locale; import java.util.Properties; import java.util.Random; @@ -93,7 +91,7 @@ public class KTableKTableLeftJoinTest { TopologyWrapper.getInternalTopologyBuilder(builder.build()).copartitionGroups(); assertEquals(1, copartitionGroups.size()); - assertEquals(new HashSet<>(Arrays.asList(topic1, topic2)), copartitionGroups.iterator().next()); + assertEquals(Set.of(topic1, topic2), copartitionGroups.iterator().next()); try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { final TestInputTopic inputTopic1 = diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableOuterJoinTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableOuterJoinTest.java index c6d58d8664c..080cdbd82d1 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableOuterJoinTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KTableKTableOuterJoinTest.java @@ -40,9 +40,7 @@ import org.junit.jupiter.api.Test; import java.time.Duration; import java.time.Instant; -import java.util.Arrays; import java.util.Collection; -import java.util.HashSet; import java.util.Properties; import java.util.Set; @@ -79,7 +77,7 @@ public class KTableKTableOuterJoinTest { TopologyWrapper.getInternalTopologyBuilder(builder.build()).copartitionGroups(); assertEquals(1, copartitionGroups.size()); - assertEquals(new HashSet<>(Arrays.asList(topic1, topic2)), copartitionGroups.iterator().next()); + assertEquals(Set.of(topic1, topic2), copartitionGroups.iterator().next()); try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { final TestInputTopic inputTopic1 = diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/InternalTopicManagerTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/InternalTopicManagerTest.java index 536d2e26ee8..53cdae3638c 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/InternalTopicManagerTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/InternalTopicManagerTest.java @@ -328,7 +328,7 @@ public class InternalTopicManagerTest { final InternalTopicManager internalTopicManager = new InternalTopicManager(time, mockAdminClient, new StreamsConfig(config)); try { - final Set topic1set = new HashSet<>(Collections.singletonList(topic1)); + final Set topic1set = Set.of(topic1); internalTopicManager.getTopicPartitionInfo(topic1set, null); } catch (final TimeoutException expected) { @@ -338,7 +338,7 @@ public class InternalTopicManagerTest { mockAdminClient.timeoutNextRequest(1); try { - final Set topic2set = new HashSet<>(Collections.singletonList(topic2)); + final Set topic2set = Set.of(topic2); internalTopicManager.getTopicPartitionInfo(topic2set, null); } catch (final TimeoutException expected) { @@ -353,7 +353,7 @@ public class InternalTopicManagerTest { final InternalTopicManager internalTopicManager = new InternalTopicManager(time, mockAdminClient, new StreamsConfig(config)); try { - final Set topic1set = new HashSet<>(Collections.singletonList(topic1)); + final Set topic1set = Set.of(topic1); final Set topic2set = new HashSet<>(Collections.singletonList(topic2)); internalTopicManager.getNumPartitions(topic1set, topic2set); @@ -365,7 +365,7 @@ public class InternalTopicManagerTest { mockAdminClient.timeoutNextRequest(1); try { - final Set topic1set = new HashSet<>(Collections.singletonList(topic1)); + final Set topic1set = Set.of(topic1); final Set topic2set = new HashSet<>(Collections.singletonList(topic2)); internalTopicManager.getNumPartitions(topic1set, topic2set); diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorNodeTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorNodeTest.java index 5341cd25f0d..e9669ac39f4 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorNodeTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorNodeTest.java @@ -202,7 +202,7 @@ public class ProcessorNodeTest { throw new TaskCorruptedException(tasksIds, new InvalidOffsetException("Invalid offset") { @Override public Set partitions() { - return new HashSet<>(Collections.singletonList(new TopicPartition("topic", 0))); + return Set.of(new TopicPartition("topic", 0)); } }); } diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamTaskTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamTaskTest.java index fda9afa9a88..701f38eda0c 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamTaskTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamTaskTest.java @@ -1991,7 +1991,7 @@ public class StreamTaskTest { task = new StreamTask( taskId, - new HashSet<>(List.of(partition1, repartition)), + Set.of(partition1, repartition), topology, consumer, new TopologyConfig(null, config, new Properties()).getTaskConfig(), @@ -2920,7 +2920,7 @@ public class StreamTaskTest { final TaskCorruptedException expectedException = new TaskCorruptedException(tasksIds, new InvalidOffsetException("Invalid offset") { @Override public Set partitions() { - return new HashSet<>(Collections.singletonList(new TopicPartition("topic", 0))); + return Set.of(new TopicPartition("topic", 0)); } }); @@ -3084,7 +3084,7 @@ public class StreamTaskTest { return new StreamTask( taskId, - new HashSet<>(List.of(partition1)), + Set.of(partition1), topology, consumer, new TopologyConfig(null, config, new Properties()).getTaskConfig(), @@ -3234,7 +3234,7 @@ public class StreamTaskTest { return new StreamTask( taskId, - new HashSet<>(List.of(partition1)), + Set.of(partition1), topology, consumer, new TopologyConfig(null, config, new Properties()).getTaskConfig(), @@ -3340,7 +3340,7 @@ public class StreamTaskTest { task = new StreamTask( taskId, - new HashSet<>(List.of(partition1)), + Set.of(partition1), topology, consumer, new TopologyConfig(null, config, new Properties()).getTaskConfig(), diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamsPartitionAssignorTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamsPartitionAssignorTest.java index b34d1408c56..89989a1d9a4 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamsPartitionAssignorTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamsPartitionAssignorTest.java @@ -893,8 +893,8 @@ public class StreamsPartitionAssignorTest { // then metadata gets populated assignments = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment(); // check assigned partitions - assertEquals(Set.of(new HashSet<>(List.of(t1p0, t2p0, t1p0, t2p0, t1p1, t2p1, t1p2, t2p2))), - Set.of(new HashSet<>(assignments.get("consumer10").partitions()))); + assertEquals(Set.of(t1p0, t2p0, t1p1, t2p1, t1p2, t2p2), + new HashSet<>(assignments.get("consumer10").partitions())); // the first consumer info10 = checkAssignment(allTopics, assignments.get("consumer10")); diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/TaskExecutionMetadataTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/TaskExecutionMetadataTest.java index 7b8c352d1a8..5048264ef76 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/TaskExecutionMetadataTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/TaskExecutionMetadataTest.java @@ -21,7 +21,6 @@ import org.apache.kafka.streams.processor.TaskId; import org.junit.jupiter.api.Test; -import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.Set; @@ -35,7 +34,7 @@ import static org.mockito.Mockito.when; public class TaskExecutionMetadataTest { static final String TOPOLOGY1 = "topology1"; static final String TOPOLOGY2 = "topology2"; - static final Set NAMED_TOPOLOGIES = new HashSet<>(Arrays.asList(TOPOLOGY1, TOPOLOGY2)); + static final Set NAMED_TOPOLOGIES = Set.of(TOPOLOGY1, TOPOLOGY2); static final int TIME_ZERO = 0; static final int CONSTANT_BACKOFF_MS = 5000; diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/assignment/LegacyStickyTaskAssignorTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/assignment/LegacyStickyTaskAssignorTest.java index 3fcb66501ed..3103e72cd52 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/assignment/LegacyStickyTaskAssignorTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/assignment/LegacyStickyTaskAssignorTest.java @@ -753,7 +753,7 @@ public class LegacyStickyTaskAssignorTest { public void shouldMoveMinimalNumberOfTasksWhenPreviouslyAboveCapacityAndNewClientAdded(final String rackAwareStrategy) { setUp(rackAwareStrategy); final Set p1PrevTasks = new HashSet<>(List.of(TASK_0_0, TASK_0_2)); - final Set p2PrevTasks = new HashSet<>(List.of(TASK_0_1, TASK_0_3)); + final Set p2PrevTasks = Set.of(TASK_0_1, TASK_0_3); createClientWithPreviousActiveTasks(PID_1, 1, TASK_0_0, TASK_0_2); createClientWithPreviousActiveTasks(PID_2, 1, TASK_0_1, TASK_0_3); diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/assignment/SubscriptionInfoTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/assignment/SubscriptionInfoTest.java index 19a94e9e6d9..618fd093c76 100644 --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/assignment/SubscriptionInfoTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/assignment/SubscriptionInfoTest.java @@ -23,8 +23,6 @@ import org.apache.kafka.streams.processor.internals.Task; import org.junit.jupiter.api.Test; import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.HashSet; import java.util.Map; import java.util.Set; @@ -58,13 +56,13 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.fail; public class SubscriptionInfoTest { - private static final Set ACTIVE_TASKS = new HashSet<>(Arrays.asList( + private static final Set ACTIVE_TASKS = Set.of( TASK_0_0, TASK_0_1, - TASK_1_0)); - private static final Set STANDBY_TASKS = new HashSet<>(Arrays.asList( + TASK_1_0); + private static final Set STANDBY_TASKS = Set.of( TASK_1_1, - TASK_2_0)); + TASK_2_0); private static final Map TASK_OFFSET_SUMS = mkMap( mkEntry(TASK_0_0, Task.LATEST_OFFSET), mkEntry(TASK_0_1, Task.LATEST_OFFSET), diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractRocksDBWindowStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractRocksDBWindowStoreTest.java index 50964febf63..66a896598f1 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractRocksDBWindowStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractRocksDBWindowStoreTest.java @@ -33,8 +33,6 @@ import org.junit.jupiter.api.Test; import java.io.File; import java.util.ArrayList; -import java.util.Collections; -import java.util.HashSet; import java.util.List; import java.util.Set; @@ -175,7 +173,7 @@ public abstract class AbstractRocksDBWindowStoreTest extends AbstractWindowBytes // while for TimeOrderedWindowStores, actualFrom = observedStreamTime - retention // expired record assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch( 0, ofEpochMilli(startTime - WINDOW_SIZE), @@ -191,7 +189,7 @@ public abstract class AbstractRocksDBWindowStoreTest extends AbstractWindowBytes // actualFrom = 150000, hence not ignored if (storeType() == StoreType.RocksDBWindowStore) { assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch( 1, ofEpochMilli(startTime + increment - WINDOW_SIZE), @@ -199,32 +197,32 @@ public abstract class AbstractRocksDBWindowStoreTest extends AbstractWindowBytes } else { assertEquals( - new HashSet<>(Collections.singletonList("one")), + Set.of("one"), valuesToSetAndCloseIterator(windowStore.fetch( 1, ofEpochMilli(startTime + increment - WINDOW_SIZE), ofEpochMilli(startTime + increment + WINDOW_SIZE)))); } assertEquals( - new HashSet<>(Collections.singletonList("two")), + Set.of("two"), valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(startTime + increment * 2 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 2 + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch( 3, ofEpochMilli(startTime + increment * 3 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 3 + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.singletonList("four")), + Set.of("four"), valuesToSetAndCloseIterator(windowStore.fetch( 4, ofEpochMilli(startTime + increment * 4 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 4 + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.singletonList("five")), + Set.of("five"), valuesToSetAndCloseIterator(windowStore.fetch( 5, ofEpochMilli(startTime + increment * 5 - WINDOW_SIZE), @@ -241,13 +239,13 @@ public abstract class AbstractRocksDBWindowStoreTest extends AbstractWindowBytes ); assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch( 0, ofEpochMilli(startTime - WINDOW_SIZE), ofEpochMilli(startTime + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch( 1, ofEpochMilli(startTime + increment - WINDOW_SIZE), @@ -264,7 +262,7 @@ public abstract class AbstractRocksDBWindowStoreTest extends AbstractWindowBytes if (storeType() == StoreType.RocksDBWindowStore) { assertEquals( // expired record - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(startTime + increment * 2 - WINDOW_SIZE), @@ -272,32 +270,32 @@ public abstract class AbstractRocksDBWindowStoreTest extends AbstractWindowBytes } else { assertEquals( // expired record - new HashSet<>(Collections.singletonList("two")), + Set.of("two"), valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(startTime + increment * 2 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 2 + WINDOW_SIZE)))); } assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch( 3, ofEpochMilli(startTime + increment * 3 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 3 + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.singletonList("four")), + Set.of("four"), valuesToSetAndCloseIterator(windowStore.fetch( 4, ofEpochMilli(startTime + increment * 4 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 4 + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.singletonList("five")), + Set.of("five"), valuesToSetAndCloseIterator(windowStore.fetch( 5, ofEpochMilli(startTime + increment * 5 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 5 + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.singletonList("six")), + Set.of("six"), valuesToSetAndCloseIterator(windowStore.fetch( 6, ofEpochMilli(startTime + increment * 6 - WINDOW_SIZE), @@ -314,50 +312,50 @@ public abstract class AbstractRocksDBWindowStoreTest extends AbstractWindowBytes ); assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch( 0, ofEpochMilli(startTime - WINDOW_SIZE), ofEpochMilli(startTime + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch( 1, ofEpochMilli(startTime + increment - WINDOW_SIZE), ofEpochMilli(startTime + increment + WINDOW_SIZE)))); assertEquals( // expired record - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(startTime + increment * 2 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 2 + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch( 3, ofEpochMilli(startTime + increment * 3 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 3 + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.singletonList("four")), + Set.of("four"), valuesToSetAndCloseIterator(windowStore.fetch( 4, ofEpochMilli(startTime + increment * 4 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 4 + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.singletonList("five")), + Set.of("five"), valuesToSetAndCloseIterator(windowStore.fetch( 5, ofEpochMilli(startTime + increment * 5 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 5 + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.singletonList("six")), + Set.of("six"), valuesToSetAndCloseIterator(windowStore.fetch( 6, ofEpochMilli(startTime + increment * 6 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 6 + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.singletonList("seven")), + Set.of("seven"), valuesToSetAndCloseIterator(windowStore.fetch( 7, ofEpochMilli(startTime + increment * 7 - WINDOW_SIZE), @@ -374,25 +372,25 @@ public abstract class AbstractRocksDBWindowStoreTest extends AbstractWindowBytes ); assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch( 0, ofEpochMilli(startTime - WINDOW_SIZE), ofEpochMilli(startTime + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch( 1, ofEpochMilli(startTime + increment - WINDOW_SIZE), ofEpochMilli(startTime + increment + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(startTime + increment * 2 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 2 + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch( 3, ofEpochMilli(startTime + increment * 3 - WINDOW_SIZE), @@ -400,7 +398,7 @@ public abstract class AbstractRocksDBWindowStoreTest extends AbstractWindowBytes if (storeType() == StoreType.RocksDBWindowStore) { assertEquals( // expired record - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch( 4, ofEpochMilli(startTime + increment * 4 - WINDOW_SIZE), @@ -408,7 +406,7 @@ public abstract class AbstractRocksDBWindowStoreTest extends AbstractWindowBytes } else { assertEquals( // expired record - new HashSet<>(Collections.singletonList("four")), + Set.of("four"), valuesToSetAndCloseIterator(windowStore.fetch( 4, ofEpochMilli(startTime + increment * 4 - WINDOW_SIZE), @@ -416,25 +414,25 @@ public abstract class AbstractRocksDBWindowStoreTest extends AbstractWindowBytes } assertEquals( - new HashSet<>(Collections.singletonList("five")), + Set.of("five"), valuesToSetAndCloseIterator(windowStore.fetch( 5, ofEpochMilli(startTime + increment * 5 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 5 + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.singletonList("six")), + Set.of("six"), valuesToSetAndCloseIterator(windowStore.fetch( 6, ofEpochMilli(startTime + increment * 6 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 6 + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.singletonList("seven")), + Set.of("seven"), valuesToSetAndCloseIterator(windowStore.fetch( 7, ofEpochMilli(startTime + increment * 7 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 7 + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.singletonList("eight")), + Set.of("eight"), valuesToSetAndCloseIterator(windowStore.fetch( 8, ofEpochMilli(startTime + increment * 8 - WINDOW_SIZE), @@ -616,55 +614,55 @@ public abstract class AbstractRocksDBWindowStoreTest extends AbstractWindowBytes // while for TimeOrderedWindowStores, actualFrom = observedStreamTime - retention assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch( 0, ofEpochMilli(startTime - WINDOW_SIZE), ofEpochMilli(startTime + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch( 1, ofEpochMilli(startTime + increment - WINDOW_SIZE), ofEpochMilli(startTime + increment + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(startTime + increment * 2 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 2 + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch( 3, ofEpochMilli(startTime + increment * 3 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 3 + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch( 4, ofEpochMilli(startTime + increment * 4 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 4 + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch( 5, ofEpochMilli(startTime + increment * 5 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 5 + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch( 6, ofEpochMilli(startTime + increment * 6 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 6 + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch( 7, ofEpochMilli(startTime + increment * 7 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 7 + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch( 8, ofEpochMilli(startTime + increment * 8 - WINDOW_SIZE), @@ -678,25 +676,25 @@ public abstract class AbstractRocksDBWindowStoreTest extends AbstractWindowBytes context.restore(STORE_NAME, changeLog); assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch( 0, ofEpochMilli(startTime - WINDOW_SIZE), ofEpochMilli(startTime + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch( 1, ofEpochMilli(startTime + increment - WINDOW_SIZE), ofEpochMilli(startTime + increment + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(startTime + increment * 2 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 2 + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch( 3, ofEpochMilli(startTime + increment * 3 - WINDOW_SIZE), @@ -712,14 +710,14 @@ public abstract class AbstractRocksDBWindowStoreTest extends AbstractWindowBytes // actualFrom = 240,000, hence not ignored if (storeType() == StoreType.RocksDBWindowStore) { assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch( 4, ofEpochMilli(startTime + increment * 4 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 4 + WINDOW_SIZE)))); } else { assertEquals( - new HashSet<>(Collections.singletonList("four")), + Set.of("four"), valuesToSetAndCloseIterator(windowStore.fetch( 4, ofEpochMilli(startTime + increment * 4 - WINDOW_SIZE), @@ -727,25 +725,25 @@ public abstract class AbstractRocksDBWindowStoreTest extends AbstractWindowBytes } assertEquals( - new HashSet<>(Collections.singletonList("five")), + Set.of("five"), valuesToSetAndCloseIterator(windowStore.fetch( 5, ofEpochMilli(startTime + increment * 5 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 5 + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.singletonList("six")), + Set.of("six"), valuesToSetAndCloseIterator(windowStore.fetch( 6, ofEpochMilli(startTime + increment * 6 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 6 + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.singletonList("seven")), + Set.of("seven"), valuesToSetAndCloseIterator(windowStore.fetch( 7, ofEpochMilli(startTime + increment * 7 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 7 + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.singletonList("eight")), + Set.of("eight"), valuesToSetAndCloseIterator(windowStore.fetch( 8, ofEpochMilli(startTime + increment * 8 - WINDOW_SIZE), @@ -786,7 +784,7 @@ public abstract class AbstractRocksDBWindowStoreTest extends AbstractWindowBytes private Set segmentDirs(final File baseDir) { final File windowDir = new File(baseDir, windowStore.name()); - return new HashSet<>(asList(requireNonNull(windowDir.list()))); + return Set.of(requireNonNull(windowDir.list())); } } \ No newline at end of file diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractSessionBytesStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractSessionBytesStoreTest.java index 744f3ebc864..18b7dd89003 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractSessionBytesStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractSessionBytesStoreTest.java @@ -566,43 +566,43 @@ public abstract class AbstractSessionBytesStoreTest { try (final KeyValueIterator, Long> iterator = sessionStore.findSessions("a", 0, Long.MAX_VALUE) ) { - assertThat(valuesToSet(iterator), equalTo(new HashSet<>(asList(1L, 3L, 5L)))); + assertThat(valuesToSet(iterator), equalTo(Set.of(1L, 3L, 5L))); } try (final KeyValueIterator, Long> iterator = sessionStore.findSessions("aa", 0, Long.MAX_VALUE) ) { - assertThat(valuesToSet(iterator), equalTo(new HashSet<>(asList(2L, 4L)))); + assertThat(valuesToSet(iterator), equalTo(Set.of(2L, 4L))); } try (final KeyValueIterator, Long> iterator = sessionStore.findSessions("a", "aa", 0, Long.MAX_VALUE) ) { - assertThat(valuesToSet(iterator), equalTo(new HashSet<>(asList(1L, 2L, 3L, 4L, 5L)))); + assertThat(valuesToSet(iterator), equalTo(Set.of(1L, 2L, 3L, 4L, 5L))); } try (final KeyValueIterator, Long> iterator = sessionStore.findSessions("a", "aa", 10, 0) ) { - assertThat(valuesToSet(iterator), equalTo(new HashSet<>(Collections.singletonList(2L)))); + assertThat(valuesToSet(iterator), equalTo(Set.of(2L))); } try (final KeyValueIterator, Long> iterator = sessionStore.findSessions(null, "aa", 0, Long.MAX_VALUE) ) { - assertThat(valuesToSet(iterator), equalTo(new HashSet<>(asList(1L, 2L, 3L, 4L, 5L)))); + assertThat(valuesToSet(iterator), equalTo(Set.of(1L, 2L, 3L, 4L, 5L))); } try (final KeyValueIterator, Long> iterator = sessionStore.findSessions("a", null, 0, Long.MAX_VALUE) ) { - assertThat(valuesToSet(iterator), equalTo(new HashSet<>(asList(1L, 2L, 3L, 4L, 5L)))); + assertThat(valuesToSet(iterator), equalTo(Set.of(1L, 2L, 3L, 4L, 5L))); } try (final KeyValueIterator, Long> iterator = sessionStore.findSessions(null, null, 0, Long.MAX_VALUE) ) { - assertThat(valuesToSet(iterator), equalTo(new HashSet<>(asList(1L, 2L, 3L, 4L, 5L)))); + assertThat(valuesToSet(iterator), equalTo(Set.of(1L, 2L, 3L, 4L, 5L))); } } @@ -622,43 +622,43 @@ public abstract class AbstractSessionBytesStoreTest { try (final KeyValueIterator, Long> iterator = sessionStore.backwardFindSessions("a", 0, Long.MAX_VALUE) ) { - assertThat(valuesToSet(iterator), equalTo(new HashSet<>(asList(1L, 3L, 5L)))); + assertThat(valuesToSet(iterator), equalTo(Set.of(1L, 3L, 5L))); } try (final KeyValueIterator, Long> iterator = sessionStore.backwardFindSessions("aa", 0, Long.MAX_VALUE) ) { - assertThat(valuesToSet(iterator), equalTo(new HashSet<>(asList(2L, 4L)))); + assertThat(valuesToSet(iterator), equalTo(Set.of(2L, 4L))); } try (final KeyValueIterator, Long> iterator = sessionStore.backwardFindSessions("a", "aa", 0, Long.MAX_VALUE) ) { - assertThat(valuesToSet(iterator), equalTo(new HashSet<>(asList(1L, 2L, 3L, 4L, 5L)))); + assertThat(valuesToSet(iterator), equalTo(Set.of(1L, 2L, 3L, 4L, 5L))); } try (final KeyValueIterator, Long> iterator = sessionStore.backwardFindSessions("a", "aa", 10, 0) ) { - assertThat(valuesToSet(iterator), equalTo(new HashSet<>(Collections.singletonList(2L)))); + assertThat(valuesToSet(iterator), equalTo(Set.of(2L))); } try (final KeyValueIterator, Long> iterator = sessionStore.backwardFindSessions(null, "aa", 0, Long.MAX_VALUE) ) { - assertThat(valuesToSet(iterator), equalTo(new HashSet<>(asList(1L, 2L, 3L, 4L, 5L)))); + assertThat(valuesToSet(iterator), equalTo(Set.of(1L, 2L, 3L, 4L, 5L))); } try (final KeyValueIterator, Long> iterator = sessionStore.backwardFindSessions("a", null, 0, Long.MAX_VALUE) ) { - assertThat(valuesToSet(iterator), equalTo(new HashSet<>(asList(1L, 2L, 3L, 4L, 5L)))); + assertThat(valuesToSet(iterator), equalTo(Set.of(1L, 2L, 3L, 4L, 5L))); } try (final KeyValueIterator, Long> iterator = sessionStore.backwardFindSessions(null, null, 0, Long.MAX_VALUE) ) { - assertThat(valuesToSet(iterator), equalTo(new HashSet<>(asList(1L, 2L, 3L, 4L, 5L)))); + assertThat(valuesToSet(iterator), equalTo(Set.of(1L, 2L, 3L, 4L, 5L))); } } @@ -961,11 +961,11 @@ public abstract class AbstractSessionBytesStoreTest { sessionStore.findSessions("a", "b", 0L, Long.MAX_VALUE) ) { if (storeType() == StoreType.InMemoryStore) { - assertEquals(valuesToSet(iterator), new HashSet<>(Arrays.asList(2L, 3L, 4L))); + assertEquals(valuesToSet(iterator), Set.of(2L, 3L, 4L)); } else { // The 2 records with values 2L and 3L are considered expired as // their end times < observed stream time - retentionPeriod + 1. - assertEquals(valuesToSet(iterator), new HashSet<>(Collections.singletonList(4L))); + assertEquals(valuesToSet(iterator), Set.of(4L)); } } } diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractWindowBytesStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractWindowBytesStoreTest.java index 8d2e7e61abd..1486cca8c7e 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractWindowBytesStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractWindowBytesStoreTest.java @@ -129,7 +129,7 @@ public abstract class AbstractWindowBytesStoreTest { putFirstBatch(windowStore, defaultStartTime, context); assertEquals( - new HashSet<>(Collections.singletonList("zero")), + Set.of("zero"), valuesToSetAndCloseIterator(windowStore.fetch( 0, ofEpochMilli(defaultStartTime - WINDOW_SIZE), @@ -145,91 +145,91 @@ public abstract class AbstractWindowBytesStoreTest { assertEquals("two+6", windowStore.fetch(2, defaultStartTime + 8L)); assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(defaultStartTime - 2L - WINDOW_SIZE), ofEpochMilli(defaultStartTime - 2L + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.singletonList("two")), + Set.of("two"), valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(defaultStartTime - 1L - WINDOW_SIZE), ofEpochMilli(defaultStartTime - 1L + WINDOW_SIZE)))); assertEquals( - new HashSet<>(asList("two", "two+1")), + Set.of("two", "two+1"), valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(defaultStartTime - WINDOW_SIZE), ofEpochMilli(defaultStartTime + WINDOW_SIZE)))); assertEquals( - new HashSet<>(asList("two", "two+1", "two+2")), + Set.of("two", "two+1", "two+2"), valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(defaultStartTime + 1L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 1L + WINDOW_SIZE)))); assertEquals( - new HashSet<>(asList("two", "two+1", "two+2", "two+3")), + Set.of("two", "two+1", "two+2", "two+3"), valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(defaultStartTime + 2L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 2L + WINDOW_SIZE)))); assertEquals( - new HashSet<>(asList("two", "two+1", "two+2", "two+3", "two+4")), + Set.of("two", "two+1", "two+2", "two+3", "two+4"), valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(defaultStartTime + 3L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 3L + WINDOW_SIZE)))); assertEquals( - new HashSet<>(asList("two", "two+1", "two+2", "two+3", "two+4", "two+5")), + Set.of("two", "two+1", "two+2", "two+3", "two+4", "two+5"), valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(defaultStartTime + 4L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 4L + WINDOW_SIZE)))); assertEquals( - new HashSet<>(asList("two", "two+1", "two+2", "two+3", "two+4", "two+5", "two+6")), + Set.of("two", "two+1", "two+2", "two+3", "two+4", "two+5", "two+6"), valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(defaultStartTime + 5L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 5L + WINDOW_SIZE)))); assertEquals( - new HashSet<>(asList("two+1", "two+2", "two+3", "two+4", "two+5", "two+6")), + Set.of("two+1", "two+2", "two+3", "two+4", "two+5", "two+6"), valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(defaultStartTime + 6L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 6L + WINDOW_SIZE)))); assertEquals( - new HashSet<>(asList("two+2", "two+3", "two+4", "two+5", "two+6")), + Set.of("two+2", "two+3", "two+4", "two+5", "two+6"), valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(defaultStartTime + 7L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 7L + WINDOW_SIZE)))); assertEquals( - new HashSet<>(asList("two+3", "two+4", "two+5", "two+6")), + Set.of("two+3", "two+4", "two+5", "two+6"), valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(defaultStartTime + 8L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 8L + WINDOW_SIZE)))); assertEquals( - new HashSet<>(asList("two+4", "two+5", "two+6")), + Set.of("two+4", "two+5", "two+6"), valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(defaultStartTime + 9L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 9L + WINDOW_SIZE)))); assertEquals( - new HashSet<>(asList("two+5", "two+6")), + Set.of("two+5", "two+6"), valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(defaultStartTime + 10L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 10L + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.singletonList("two+6")), + Set.of("two+6"), valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(defaultStartTime + 11L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 11L + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch( 2, ofEpochMilli(defaultStartTime + 12L - WINDOW_SIZE), @@ -561,70 +561,70 @@ public abstract class AbstractWindowBytesStoreTest { putFirstBatch(windowStore, defaultStartTime, context); assertEquals( - new HashSet<>(Collections.singletonList("zero")), + Set.of("zero"), valuesToSetAndCloseIterator(windowStore.fetch(0, ofEpochMilli(defaultStartTime - WINDOW_SIZE), ofEpochMilli(defaultStartTime)))); assertEquals( - new HashSet<>(Collections.singletonList("one")), + Set.of("one"), valuesToSetAndCloseIterator(windowStore.fetch(1, ofEpochMilli(defaultStartTime + 1L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 1L)))); assertEquals( - new HashSet<>(Collections.singletonList("two")), + Set.of("two"), valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 2L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 2L)))); assertEquals( - new HashSet<>(Collections.singletonList("three")), + Set.of("three"), valuesToSetAndCloseIterator(windowStore.fetch(3, ofEpochMilli(defaultStartTime + 3L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 3L)))); assertEquals( - new HashSet<>(Collections.singletonList("four")), + Set.of("four"), valuesToSetAndCloseIterator(windowStore.fetch(4, ofEpochMilli(defaultStartTime + 4L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 4L)))); assertEquals( - new HashSet<>(Collections.singletonList("five")), + Set.of("five"), valuesToSetAndCloseIterator(windowStore.fetch(5, ofEpochMilli(defaultStartTime + 5L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 5L)))); putSecondBatch(windowStore, defaultStartTime); assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime - 1L - WINDOW_SIZE), ofEpochMilli(defaultStartTime - 1L)))); assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime - WINDOW_SIZE), ofEpochMilli(defaultStartTime)))); assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 1L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 1L)))); assertEquals( - new HashSet<>(Collections.singletonList("two")), + Set.of("two"), valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 2L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 2L)))); assertEquals( - new HashSet<>(asList("two", "two+1")), + Set.of("two", "two+1"), valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 3L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 3L)))); assertEquals( - new HashSet<>(asList("two", "two+1", "two+2")), + Set.of("two", "two+1", "two+2"), valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 4L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 4L)))); assertEquals( - new HashSet<>(asList("two", "two+1", "two+2", "two+3")), + Set.of("two", "two+1", "two+2", "two+3"), valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 5L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 5L)))); assertEquals( - new HashSet<>(asList("two+1", "two+2", "two+3", "two+4")), + Set.of("two+1", "two+2", "two+3", "two+4"), valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 6L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 6L)))); assertEquals( - new HashSet<>(asList("two+2", "two+3", "two+4", "two+5")), + Set.of("two+2", "two+3", "two+4", "two+5"), valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 7L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 7L)))); assertEquals( - new HashSet<>(asList("two+3", "two+4", "two+5", "two+6")), + Set.of("two+3", "two+4", "two+5", "two+6"), valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 8L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 8L)))); assertEquals( - new HashSet<>(asList("two+4", "two+5", "two+6")), + Set.of("two+4", "two+5", "two+6"), valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 9L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 9L)))); assertEquals( - new HashSet<>(asList("two+5", "two+6")), + Set.of("two+5", "two+6"), valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 10L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 10L)))); assertEquals( - new HashSet<>(Collections.singletonList("two+6")), + Set.of("two+6"), valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 11L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 11L)))); assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 12L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 12L)))); assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 13L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 13L)))); // Flush the store and verify all current entries were properly flushed ... @@ -650,90 +650,90 @@ public abstract class AbstractWindowBytesStoreTest { putFirstBatch(windowStore, defaultStartTime, context); assertEquals( - new HashSet<>(Collections.singletonList("zero")), + Set.of("zero"), valuesToSetAndCloseIterator(windowStore.fetch(0, ofEpochMilli(defaultStartTime), ofEpochMilli(defaultStartTime + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.singletonList("one")), + Set.of("one"), valuesToSetAndCloseIterator(windowStore.fetch(1, ofEpochMilli(defaultStartTime + 1L), ofEpochMilli(defaultStartTime + 1L + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.singletonList("two")), + Set.of("two"), valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 2L), ofEpochMilli(defaultStartTime + 2L + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch(3, ofEpochMilli(defaultStartTime + 3L), ofEpochMilli(defaultStartTime + 3L + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.singletonList("four")), + Set.of("four"), valuesToSetAndCloseIterator(windowStore.fetch(4, ofEpochMilli(defaultStartTime + 4L), ofEpochMilli(defaultStartTime + 4L + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.singletonList("five")), + Set.of("five"), valuesToSetAndCloseIterator(windowStore.fetch(5, ofEpochMilli(defaultStartTime + 5L), ofEpochMilli(defaultStartTime + 5L + WINDOW_SIZE)))); putSecondBatch(windowStore, defaultStartTime); assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime - 2L), ofEpochMilli(defaultStartTime - 2L + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.singletonList("two")), + Set.of("two"), valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime - 1L), ofEpochMilli(defaultStartTime - 1L + WINDOW_SIZE)))); assertEquals( - new HashSet<>(asList("two", "two+1")), + Set.of("two", "two+1"), valuesToSetAndCloseIterator(windowStore .fetch(2, ofEpochMilli(defaultStartTime), ofEpochMilli(defaultStartTime + WINDOW_SIZE)))); assertEquals( - new HashSet<>(asList("two", "two+1", "two+2")), + Set.of("two", "two+1", "two+2"), valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 1L), ofEpochMilli(defaultStartTime + 1L + WINDOW_SIZE)))); assertEquals( - new HashSet<>(asList("two", "two+1", "two+2", "two+3")), + Set.of("two", "two+1", "two+2", "two+3"), valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 2L), ofEpochMilli(defaultStartTime + 2L + WINDOW_SIZE)))); assertEquals( - new HashSet<>(asList("two+1", "two+2", "two+3", "two+4")), + Set.of("two+1", "two+2", "two+3", "two+4"), valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 3L), ofEpochMilli(defaultStartTime + 3L + WINDOW_SIZE)))); assertEquals( - new HashSet<>(asList("two+2", "two+3", "two+4", "two+5")), + Set.of("two+2", "two+3", "two+4", "two+5"), valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 4L), ofEpochMilli(defaultStartTime + 4L + WINDOW_SIZE)))); assertEquals( - new HashSet<>(asList("two+3", "two+4", "two+5", "two+6")), + Set.of("two+3", "two+4", "two+5", "two+6"), valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 5L), ofEpochMilli(defaultStartTime + 5L + WINDOW_SIZE)))); assertEquals( - new HashSet<>(asList("two+4", "two+5", "two+6")), + Set.of("two+4", "two+5", "two+6"), valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 6L), ofEpochMilli(defaultStartTime + 6L + WINDOW_SIZE)))); assertEquals( - new HashSet<>(asList("two+5", "two+6")), + Set.of("two+5", "two+6"), valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 7L), ofEpochMilli(defaultStartTime + 7L + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.singletonList("two+6")), + Set.of("two+6"), valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 8L), ofEpochMilli(defaultStartTime + 8L + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 9L), ofEpochMilli(defaultStartTime + 9L + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 10L), ofEpochMilli(defaultStartTime + 10L + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 11L), ofEpochMilli(defaultStartTime + 11L + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch(2, ofEpochMilli(defaultStartTime + 12L), ofEpochMilli(defaultStartTime + 12L + WINDOW_SIZE)))); @@ -767,7 +767,7 @@ public abstract class AbstractWindowBytesStoreTest { windowStore.put(0, "zero", defaultStartTime); assertEquals( - new HashSet<>(Collections.singletonList("zero")), + Set.of("zero"), valuesToSetAndCloseIterator(windowStore.fetch(0, ofEpochMilli(defaultStartTime - WINDOW_SIZE), ofEpochMilli(defaultStartTime + WINDOW_SIZE)))); @@ -800,7 +800,7 @@ public abstract class AbstractWindowBytesStoreTest { ofEpochMilli(defaultStartTime + 3L - WINDOW_SIZE), ofEpochMilli(defaultStartTime + 3L + WINDOW_SIZE)))); assertEquals( - new HashSet<>(Collections.emptyList()), + Set.of(), valuesToSetAndCloseIterator(windowStore.fetch( 0, ofEpochMilli(defaultStartTime + 4L - WINDOW_SIZE), @@ -852,7 +852,7 @@ public abstract class AbstractWindowBytesStoreTest { windowStore.put("aa", "0004", 1); windowStore.put("a", "0005", 0x7a00000000000000L - 1); - final Set expected = new HashSet<>(asList("0001", "0003", "0005")); + final Set expected = Set.of("0001", "0003", "0005"); assertThat( valuesToSetAndCloseIterator(windowStore.fetch("a", ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))), equalTo(expected) @@ -862,20 +862,20 @@ public abstract class AbstractWindowBytesStoreTest { toSet(windowStore.fetch("a", "a", ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))); assertThat( set, - equalTo(new HashSet<>(asList( + equalTo(Set.of( windowedPair("a", "0001", 0, windowSize), windowedPair("a", "0003", 1, windowSize), windowedPair("a", "0005", 0x7a00000000000000L - 1, windowSize) - ))) + )) ); set = toSet(windowStore.fetch("aa", "aa", ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))); assertThat( set, - equalTo(new HashSet<>(asList( + equalTo(Set.of( windowedPair("aa", "0002", 0, windowSize), windowedPair("aa", "0004", 1, windowSize) - ))) + )) ); windowStore.close(); } @@ -934,17 +934,17 @@ public abstract class AbstractWindowBytesStoreTest { windowStore.put(key2, "8", 59999); windowStore.put(key3, "9", 59999); - final Set expectedKey1 = new HashSet<>(asList("1", "4", "7")); + final Set expectedKey1 = Set.of("1", "4", "7"); assertThat( valuesToSetAndCloseIterator(windowStore.fetch(key1, ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))), equalTo(expectedKey1) ); - final Set expectedKey2 = new HashSet<>(asList("2", "5", "8")); + final Set expectedKey2 = Set.of("2", "5", "8"); assertThat( valuesToSetAndCloseIterator(windowStore.fetch(key2, ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))), equalTo(expectedKey2) ); - final Set expectedKey3 = new HashSet<>(asList("3", "6", "9")); + final Set expectedKey3 = Set.of("3", "6", "9"); assertThat( valuesToSetAndCloseIterator(windowStore.fetch(key3, ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))), equalTo(expectedKey3) diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/InMemorySessionStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/InMemorySessionStoreTest.java index 1775d76e101..63506acd9ad 100644 --- a/streams/src/test/java/org/apache/kafka/streams/state/internals/InMemorySessionStoreTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/InMemorySessionStoreTest.java @@ -22,8 +22,7 @@ import org.apache.kafka.streams.state.KeyValueIterator; import org.junit.jupiter.api.Test; -import java.util.Arrays; -import java.util.HashSet; +import java.util.Set; import static org.apache.kafka.test.StreamsTestUtils.valuesToSet; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -48,7 +47,7 @@ public class InMemorySessionStoreTest extends AbstractSessionBytesStoreTest { // Advance stream time to expire the first three record sessionStore.put(new Windowed<>("aa", new SessionWindow(100, 2 * RETENTION_PERIOD)), 4L); - assertEquals(valuesToSet(iterator), new HashSet<>(Arrays.asList(1L, 2L, 3L, 4L))); + assertEquals(valuesToSet(iterator), Set.of(1L, 2L, 3L, 4L)); assertFalse(iterator.hasNext()); iterator.close(); diff --git a/tools/src/main/java/org/apache/kafka/tools/DelegationTokenCommand.java b/tools/src/main/java/org/apache/kafka/tools/DelegationTokenCommand.java index cb0a6e76182..e5ee54d1722 100644 --- a/tools/src/main/java/org/apache/kafka/tools/DelegationTokenCommand.java +++ b/tools/src/main/java/org/apache/kafka/tools/DelegationTokenCommand.java @@ -37,12 +37,11 @@ import org.apache.kafka.server.util.CommandLineUtils; import java.io.IOException; import java.text.SimpleDateFormat; import java.util.ArrayList; -import java.util.Arrays; import java.util.Base64; import java.util.Collections; -import java.util.HashSet; import java.util.List; import java.util.Properties; +import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.stream.Stream; @@ -299,10 +298,10 @@ public class DelegationTokenCommand { } // check invalid args - CommandLineUtils.checkInvalidArgs(parser, options, createOpt, new HashSet<>(Arrays.asList(hmacOpt, renewTimePeriodOpt, expiryTimePeriodOpt))); - CommandLineUtils.checkInvalidArgs(parser, options, renewOpt, new HashSet<>(Arrays.asList(renewPrincipalsOpt, maxLifeTimeOpt, expiryTimePeriodOpt, ownerPrincipalsOpt))); - CommandLineUtils.checkInvalidArgs(parser, options, expiryOpt, new HashSet<>(Arrays.asList(renewOpt, maxLifeTimeOpt, renewTimePeriodOpt, ownerPrincipalsOpt))); - CommandLineUtils.checkInvalidArgs(parser, options, describeOpt, new HashSet<>(Arrays.asList(renewTimePeriodOpt, maxLifeTimeOpt, hmacOpt, renewTimePeriodOpt, expiryTimePeriodOpt))); + CommandLineUtils.checkInvalidArgs(parser, options, createOpt, Set.of(hmacOpt, renewTimePeriodOpt, expiryTimePeriodOpt)); + CommandLineUtils.checkInvalidArgs(parser, options, renewOpt, Set.of(renewPrincipalsOpt, maxLifeTimeOpt, expiryTimePeriodOpt, ownerPrincipalsOpt)); + CommandLineUtils.checkInvalidArgs(parser, options, expiryOpt, Set.of(renewOpt, maxLifeTimeOpt, renewTimePeriodOpt, ownerPrincipalsOpt)); + CommandLineUtils.checkInvalidArgs(parser, options, describeOpt, Set.of(renewTimePeriodOpt, maxLifeTimeOpt, hmacOpt, renewTimePeriodOpt, expiryTimePeriodOpt)); } } } diff --git a/tools/src/main/java/org/apache/kafka/tools/TopicCommand.java b/tools/src/main/java/org/apache/kafka/tools/TopicCommand.java index 4eee239069c..2caece3b69b 100644 --- a/tools/src/main/java/org/apache/kafka/tools/TopicCommand.java +++ b/tools/src/main/java/org/apache/kafka/tools/TopicCommand.java @@ -820,8 +820,8 @@ public abstract class TopicCommand { .ofType(java.lang.Integer.class); options = parser.parse(args); - allTopicLevelOpts = new HashSet<>(Arrays.asList(alterOpt, createOpt, describeOpt, listOpt, deleteOpt)); - allReplicationReportOpts = new HashSet<>(Arrays.asList(reportUnderReplicatedPartitionsOpt, reportUnderMinIsrPartitionsOpt, reportAtMinIsrPartitionsOpt, reportUnavailablePartitionsOpt)); + allTopicLevelOpts = Set.of(alterOpt, createOpt, describeOpt, listOpt, deleteOpt); + allReplicationReportOpts = Set.of(reportUnderReplicatedPartitionsOpt, reportUnderMinIsrPartitionsOpt, reportAtMinIsrPartitionsOpt, reportUnavailablePartitionsOpt); checkArgs(); } @@ -980,8 +980,8 @@ public abstract class TopicCommand { if (!has(listOpt) && !has(describeOpt)) CommandLineUtils.checkRequiredArgs(parser, options, topicOpt); if (has(alterOpt)) { - Set> usedOptions = new HashSet<>(Arrays.asList(bootstrapServerOpt, configOpt)); - Set> invalidOptions = new HashSet<>(Arrays.asList(alterOpt)); + Set> usedOptions = Set.of(bootstrapServerOpt, configOpt); + Set> invalidOptions = Set.of(alterOpt); CommandLineUtils.checkInvalidArgsSet(parser, options, usedOptions, invalidOptions, Optional.of(KAFKA_CONFIGS_CLI_SUPPORTS_ALTERING_TOPIC_CONFIGS)); CommandLineUtils.checkRequiredArgs(parser, options, partitionsOpt); } diff --git a/tools/src/test/java/org/apache/kafka/tools/ConfigCommandTest.java b/tools/src/test/java/org/apache/kafka/tools/ConfigCommandTest.java index 91e61bf9542..fc0add1022b 100644 --- a/tools/src/test/java/org/apache/kafka/tools/ConfigCommandTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/ConfigCommandTest.java @@ -639,11 +639,11 @@ public class ConfigCommandTest { List deleteArgs = Arrays.asList("--delete-config", "connection_creation_rate"); - Set deleteAlterationOps = new HashSet<>(Collections.singletonList(new ClientQuotaAlteration.Op("connection_creation_rate", null))); + Set deleteAlterationOps = Set.of(new ClientQuotaAlteration.Op("connection_creation_rate", null)); Map propsToDelete = Collections.singletonMap("connection_creation_rate", 50.0); List addArgs = Arrays.asList("--add-config", "connection_creation_rate=100"); - Set addAlterationOps = new HashSet<>(Collections.singletonList(new ClientQuotaAlteration.Op("connection_creation_rate", 100.0))); + Set addAlterationOps = Set.of(new ClientQuotaAlteration.Op("connection_creation_rate", 100.0)); verifyAlterQuotas( concat(singleIpArgsAndEntity.getKey(), deleteArgs), @@ -672,11 +672,11 @@ public class ConfigCommandTest { "--delete-config", "request_percentage"); Map propsToDelete = Collections.singletonMap("request_percentage", 50.0); - Set alterationOps = new HashSet<>(Arrays.asList( + Set alterationOps = Set.of( new ClientQuotaAlteration.Op("consumer_byte_rate", 20000d), new ClientQuotaAlteration.Op("producer_byte_rate", 10000d), new ClientQuotaAlteration.Op("request_percentage", null) - )); + ); Entry, Map> userArgsAndEntity = argsAndExpectedEntity(Optional.ofNullable(user), ClientQuotaEntity.USER); Entry, Map> clientArgsAndEntry = argsAndExpectedEntity(Optional.ofNullable(client), ClientQuotaEntity.CLIENT_ID); @@ -844,11 +844,11 @@ public class ConfigCommandTest { assertEquals(ConfigResource.Type.TOPIC, entry.getKey().type()); assertEquals(3, alterConfigOps.size()); - Set expectedConfigOps = new HashSet<>(Arrays.asList( + Set expectedConfigOps = Set.of( new AlterConfigOp(newConfigEntry("delete.retention.ms", "1000000"), AlterConfigOp.OpType.SET), new AlterConfigOp(newConfigEntry("min.insync.replicas", "2"), AlterConfigOp.OpType.SET), new AlterConfigOp(newConfigEntry("unclean.leader.election.enable", ""), AlterConfigOp.OpType.DELETE) - )); + ); assertEquals(expectedConfigOps.size(), alterConfigOps.size()); expectedConfigOps.forEach(expectedOp -> { Optional actual = alterConfigOps.stream() diff --git a/tools/src/test/java/org/apache/kafka/tools/LogDirsCommandTest.java b/tools/src/test/java/org/apache/kafka/tools/LogDirsCommandTest.java index 8407c1fe9ff..b17491cf582 100644 --- a/tools/src/test/java/org/apache/kafka/tools/LogDirsCommandTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/LogDirsCommandTest.java @@ -183,7 +183,7 @@ public class LogDirsCommandTest { add((Integer) ((HashMap) brokersInformation.get(1)).get("broker")); }}; assertEquals(2, brokersInformation.size()); - assertEquals(new HashSet<>(Arrays.asList(2, 1)), brokerIds); + assertEquals(Set.of(2, 1), brokerIds); } } diff --git a/tools/src/test/java/org/apache/kafka/tools/MetadataQuorumCommandUnitTest.java b/tools/src/test/java/org/apache/kafka/tools/MetadataQuorumCommandUnitTest.java index a1f669d0aa6..df88ad2cbc8 100644 --- a/tools/src/test/java/org/apache/kafka/tools/MetadataQuorumCommandUnitTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/MetadataQuorumCommandUnitTest.java @@ -29,10 +29,10 @@ import org.junit.jupiter.api.Test; import java.io.File; import java.io.IOException; import java.util.Arrays; -import java.util.HashSet; import java.util.List; import java.util.Optional; import java.util.Properties; +import java.util.Set; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -226,9 +226,9 @@ public class MetadataQuorumCommandUnitTest { Properties props = new Properties(); props.setProperty("controller.listener.names", "CONTROLLER,CONTROLLER2"); props.setProperty("listeners", "CONTROLLER://example.com:9092,CONTROLLER2://:9093"); - assertEquals(new HashSet<>(Arrays.asList( + assertEquals(Set.of( new RaftVoterEndpoint("CONTROLLER", "example.com", 9092), - new RaftVoterEndpoint("CONTROLLER2", "localhost", 9093))), + new RaftVoterEndpoint("CONTROLLER2", "localhost", 9093)), MetadataQuorumCommand.getControllerAdvertisedListeners(props)); } @@ -238,9 +238,9 @@ public class MetadataQuorumCommandUnitTest { props.setProperty("controller.listener.names", "CONTROLLER,CONTROLLER2"); props.setProperty("listeners", "CONTROLLER://:9092,CONTROLLER2://:9093"); props.setProperty("advertised.listeners", "CONTROLLER://example.com:9092,CONTROLLER2://example.com:9093"); - assertEquals(new HashSet<>(Arrays.asList( + assertEquals(Set.of( new RaftVoterEndpoint("CONTROLLER", "example.com", 9092), - new RaftVoterEndpoint("CONTROLLER2", "example.com", 9093))), + new RaftVoterEndpoint("CONTROLLER2", "example.com", 9093)), MetadataQuorumCommand.getControllerAdvertisedListeners(props)); } diff --git a/tools/src/test/java/org/apache/kafka/tools/StreamsResetterTest.java b/tools/src/test/java/org/apache/kafka/tools/StreamsResetterTest.java index f8dbd687dfd..2425c21fe90 100644 --- a/tools/src/test/java/org/apache/kafka/tools/StreamsResetterTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/StreamsResetterTest.java @@ -49,7 +49,7 @@ public class StreamsResetterTest { private final StreamsResetter streamsResetter = new StreamsResetter(); private final MockConsumer consumer = new MockConsumer<>(AutoOffsetResetStrategy.EARLIEST.name()); private final TopicPartition topicPartition = new TopicPartition(TOPIC, 0); - private final Set inputTopicPartitions = new HashSet<>(Collections.singletonList(topicPartition)); + private final Set inputTopicPartitions = Set.of(topicPartition); @BeforeEach public void beforeEach() { diff --git a/tools/src/test/java/org/apache/kafka/tools/TopicCommandTest.java b/tools/src/test/java/org/apache/kafka/tools/TopicCommandTest.java index dcfe861b757..530773f2371 100644 --- a/tools/src/test/java/org/apache/kafka/tools/TopicCommandTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/TopicCommandTest.java @@ -73,6 +73,7 @@ import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Properties; +import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; @@ -259,7 +260,7 @@ public class TopicCommandTest { .configs(Collections.emptyMap()); verify(adminClient, times(1)).createTopics( - eq(new HashSet<>(Arrays.asList(expectedNewTopic))), + eq(Set.of(expectedNewTopic)), argThat(exception -> !exception.shouldRetryOnQuotaViolation()) ); } diff --git a/tools/src/test/java/org/apache/kafka/tools/consumer/group/ConsumerGroupServiceTest.java b/tools/src/test/java/org/apache/kafka/tools/consumer/group/ConsumerGroupServiceTest.java index 4bb61bfc48d..bb4ae726789 100644 --- a/tools/src/test/java/org/apache/kafka/tools/consumer/group/ConsumerGroupServiceTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/consumer/group/ConsumerGroupServiceTest.java @@ -134,8 +134,8 @@ public class ConsumerGroupServiceTest { endOffsets.put(testTopicPartition4, KafkaFuture.completedFuture(resultInfo)); endOffsets.put(testTopicPartition5, KafkaFuture.completedFuture(resultInfo)); - Set assignedTopicPartitions = new HashSet<>(Arrays.asList(testTopicPartition0, testTopicPartition1, testTopicPartition2)); - Set unassignedTopicPartitions = new HashSet<>(Arrays.asList(testTopicPartition3, testTopicPartition4, testTopicPartition5)); + Set assignedTopicPartitions = Set.of(testTopicPartition0, testTopicPartition1, testTopicPartition2); + Set unassignedTopicPartitions = Set.of(testTopicPartition3, testTopicPartition4, testTopicPartition5); ConsumerGroupDescription consumerGroupDescription = new ConsumerGroupDescription(GROUP, true, diff --git a/tools/src/test/java/org/apache/kafka/tools/consumer/group/DescribeConsumerGroupTest.java b/tools/src/test/java/org/apache/kafka/tools/consumer/group/DescribeConsumerGroupTest.java index 43c248cc9b6..3e023267f9b 100644 --- a/tools/src/test/java/org/apache/kafka/tools/consumer/group/DescribeConsumerGroupTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/consumer/group/DescribeConsumerGroupTest.java @@ -53,7 +53,6 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Map.Entry; @@ -910,7 +909,7 @@ public class DescribeConsumerGroupTest { String group = GROUP_PREFIX + groupProtocol.name(); createTopic(topic, 2); - try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(GroupProtocol.CLASSIC, group, new HashSet<>(Arrays.asList(new TopicPartition(topic, 0), new TopicPartition(topic, 1))), Collections.emptyMap()); + try (AutoCloseable protocolConsumerGroupExecutor = consumerGroupClosable(GroupProtocol.CLASSIC, group, Set.of(new TopicPartition(topic, 0), new TopicPartition(topic, 1)), Collections.emptyMap()); ConsumerGroupCommand.ConsumerGroupService service = consumerGroupService(new String[]{"--bootstrap-server", clusterInstance.bootstrapServers(), "--describe", "--group", group}) ) { TestUtils.waitForCondition(() -> { diff --git a/tools/src/test/java/org/apache/kafka/tools/consumer/group/ShareGroupCommandTest.java b/tools/src/test/java/org/apache/kafka/tools/consumer/group/ShareGroupCommandTest.java index 546cab50e0d..b7f59f1a991 100644 --- a/tools/src/test/java/org/apache/kafka/tools/consumer/group/ShareGroupCommandTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/consumer/group/ShareGroupCommandTest.java @@ -120,7 +120,7 @@ public class ShareGroupCommandTest { when(adminClient.listGroups(any(ListGroupsOptions.class))).thenReturn(result); try (ShareGroupService service = getShareGroupService(cgcArgs, adminClient)) { - Set expectedGroups = new HashSet<>(Arrays.asList(firstGroup, secondGroup)); + Set expectedGroups = Set.of(firstGroup, secondGroup); final Set[] foundGroups = new Set[]{Set.of()}; TestUtils.waitForCondition(() -> { @@ -145,13 +145,13 @@ public class ShareGroupCommandTest { ))); when(adminClient.listGroups(any(ListGroupsOptions.class))).thenReturn(resultWithAllStates); try (ShareGroupService service = getShareGroupService(cgcArgs, adminClient)) { - Set expectedListing = new HashSet<>(Arrays.asList( + Set expectedListing = Set.of( new GroupListing(firstGroup, Optional.of(GroupType.SHARE), "share", Optional.of(GroupState.STABLE)), - new GroupListing(secondGroup, Optional.of(GroupType.SHARE), "share", Optional.of(GroupState.EMPTY)))); + new GroupListing(secondGroup, Optional.of(GroupType.SHARE), "share", Optional.of(GroupState.EMPTY))); final Set[] foundListing = new Set[]{Set.of()}; TestUtils.waitForCondition(() -> { - foundListing[0] = new HashSet<>(service.listShareGroupsInStates(new HashSet<>(Arrays.asList(GroupState.values())))); + foundListing[0] = new HashSet<>(service.listShareGroupsInStates(Set.of(GroupState.values()))); return Objects.equals(expectedListing, foundListing[0]); }, "Expected to show groups " + expectedListing + ", but found " + foundListing[0]); diff --git a/tools/src/test/java/org/apache/kafka/tools/reassign/ReassignPartitionsCommandTest.java b/tools/src/test/java/org/apache/kafka/tools/reassign/ReassignPartitionsCommandTest.java index a96bccd36ed..1ee55c6eace 100644 --- a/tools/src/test/java/org/apache/kafka/tools/reassign/ReassignPartitionsCommandTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/reassign/ReassignPartitionsCommandTest.java @@ -749,7 +749,7 @@ public class ReassignPartitionsCommandTest { waitForVerifyAssignment(admin, assignment, true, new VerifyAssignmentResult(partStates, true, Collections.emptyMap(), false)); // Cancel the reassignment. - assertEquals(new AbstractMap.SimpleImmutableEntry<>(new HashSet<>(asList(foo0, baz1)), Collections.emptySet()), runCancelAssignment(assignment, true, useBootstrapServer)); + assertEquals(new AbstractMap.SimpleImmutableEntry<>(Set.of(foo0, baz1), Collections.emptySet()), runCancelAssignment(assignment, true, useBootstrapServer)); // Broker throttles are still active because we passed --preserve-throttles waitForInterBrokerThrottle(admin, asList(0, 1, 2, 3), interBrokerThrottle); // Cancelling the reassignment again should reveal nothing to cancel. diff --git a/tools/src/test/java/org/apache/kafka/tools/reassign/ReassignPartitionsUnitTest.java b/tools/src/test/java/org/apache/kafka/tools/reassign/ReassignPartitionsUnitTest.java index 6dd087dc50f..77799ae0cf0 100644 --- a/tools/src/test/java/org/apache/kafka/tools/reassign/ReassignPartitionsUnitTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/reassign/ReassignPartitionsUnitTest.java @@ -42,11 +42,11 @@ import java.util.AbstractMap.SimpleImmutableEntry; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Optional; +import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -184,7 +184,7 @@ public class ReassignPartitionsUnitTest { // Cancel the reassignment and test findPartitionReassignmentStates again. Map cancelResult = cancelPartitionReassignments(adminClient, - new HashSet<>(asList(new TopicPartition("foo", 0), new TopicPartition("quux", 2)))); + Set.of(new TopicPartition("foo", 0), new TopicPartition("quux", 2))); assertEquals(1, cancelResult.size()); assertEquals(UnknownTopicOrPartitionException.class, cancelResult.get(new TopicPartition("quux", 2)).getClass()); @@ -297,13 +297,13 @@ public class ReassignPartitionsUnitTest { assignments.put(new TopicPartition("bar", 0), asList(2, 3, 0)); assertEquals(assignments, - getReplicaAssignmentForPartitions(adminClient, new HashSet<>(asList(new TopicPartition("foo", 0), new TopicPartition("bar", 0))))); + getReplicaAssignmentForPartitions(adminClient, Set.of(new TopicPartition("foo", 0), new TopicPartition("bar", 0)))); UnknownTopicOrPartitionException exception = assertInstanceOf(UnknownTopicOrPartitionException.class, assertThrows(ExecutionException.class, () -> getReplicaAssignmentForPartitions(adminClient, - new HashSet<>(asList(new TopicPartition("foo", 0), new TopicPartition("foo", 10))))).getCause()); + Set.of(new TopicPartition("foo", 0), new TopicPartition("foo", 10)))).getCause()); assertEquals("Unable to find partition: foo-10", exception.getMessage()); } } @@ -509,16 +509,16 @@ public class ReassignPartitionsUnitTest { Map fooMoves = new HashMap<>(); - fooMoves.put(0, new PartitionMove(new HashSet<>(asList(1, 2, 3)), new HashSet<>(asList(5)))); - fooMoves.put(1, new PartitionMove(new HashSet<>(asList(4, 5, 6)), new HashSet<>(asList(7, 8)))); - fooMoves.put(2, new PartitionMove(new HashSet<>(asList(1, 2)), new HashSet<>(asList(3, 4)))); - fooMoves.put(3, new PartitionMove(new HashSet<>(asList(1, 2)), new HashSet<>(asList(5, 6)))); - fooMoves.put(4, new PartitionMove(new HashSet<>(asList(1, 2)), new HashSet<>(asList(3)))); - fooMoves.put(5, new PartitionMove(new HashSet<>(asList(1, 2)), new HashSet<>(asList(3, 4, 5, 6)))); + fooMoves.put(0, new PartitionMove(Set.of(1, 2, 3), Set.of(5))); + fooMoves.put(1, new PartitionMove(Set.of(4, 5, 6), Set.of(7, 8))); + fooMoves.put(2, new PartitionMove(Set.of(1, 2), Set.of(3, 4))); + fooMoves.put(3, new PartitionMove(Set.of(1, 2), Set.of(5, 6))); + fooMoves.put(4, new PartitionMove(Set.of(1, 2), Set.of(3))); + fooMoves.put(5, new PartitionMove(Set.of(1, 2), Set.of(3, 4, 5, 6))); Map barMoves = new HashMap<>(); - barMoves.put(0, new PartitionMove(new HashSet<>(asList(2, 3, 4)), new HashSet<>(asList(1)))); + barMoves.put(0, new PartitionMove(Set.of(2, 3, 4), Set.of(1))); assertEquals(fooMoves, moveMap.get("foo")); assertEquals(barMoves, moveMap.get("bar")); @@ -537,10 +537,10 @@ public class ReassignPartitionsUnitTest { assertEquals(expFollowerThrottle, calculateFollowerThrottles(moveMap)); - assertEquals(new HashSet<>(asList(1, 2, 3, 4, 5, 6, 7, 8)), calculateReassigningBrokers(moveMap)); - assertEquals(new HashSet<>(asList(0, 2)), calculateMovingBrokers(new HashSet<>(asList( + assertEquals(Set.of(1, 2, 3, 4, 5, 6, 7, 8), calculateReassigningBrokers(moveMap)); + assertEquals(Set.of(0, 2), calculateMovingBrokers(Set.of( new TopicPartitionReplica("quux", 0, 0), - new TopicPartitionReplica("quux", 1, 2))))); + new TopicPartitionReplica("quux", 1, 2)))); } @Test @@ -626,8 +626,8 @@ public class ReassignPartitionsUnitTest { @Test public void testModifyBrokerInterBrokerThrottle() throws Exception { try (MockAdminClient adminClient = new MockAdminClient.Builder().numBrokers(4).build()) { - modifyInterBrokerThrottle(adminClient, new HashSet<>(asList(0, 1, 2)), 1000); - modifyInterBrokerThrottle(adminClient, new HashSet<>(asList(0, 3)), 100); + modifyInterBrokerThrottle(adminClient, Set.of(0, 1, 2), 1000); + modifyInterBrokerThrottle(adminClient, Set.of(0, 3), 100); List brokers = new ArrayList<>(); for (int i = 0; i < 4; i++) brokers.add(new ConfigResource(ConfigResource.Type.BROKER, Integer.toString(i))); @@ -642,8 +642,8 @@ public class ReassignPartitionsUnitTest { @Test public void testModifyLogDirThrottle() throws Exception { try (MockAdminClient adminClient = new MockAdminClient.Builder().numBrokers(4).build()) { - modifyLogDirThrottle(adminClient, new HashSet<>(asList(0, 1, 2)), 2000); - modifyLogDirThrottle(adminClient, new HashSet<>(asList(0, 3)), -1); + modifyLogDirThrottle(adminClient, Set.of(0, 1, 2), 2000); + modifyLogDirThrottle(adminClient, Set.of(0, 3), -1); List brokers = new ArrayList<>(); for (int i = 0; i < 4; i++) @@ -747,7 +747,7 @@ public class ReassignPartitionsUnitTest { assignment.put(new TopicPartitionReplica("quux", 1, 0), "/tmp/kafka-logs1"); assertEquals( - new HashSet<>(asList(new TopicPartitionReplica("foo", 0, 0))), + Set.of(new TopicPartitionReplica("foo", 0, 0)), alterReplicaLogDirs(adminClient, assignment) ); } diff --git a/tools/src/test/java/org/apache/kafka/tools/streams/ListStreamsGroupTest.java b/tools/src/test/java/org/apache/kafka/tools/streams/ListStreamsGroupTest.java index 4a2dcac8608..dee821f71c1 100644 --- a/tools/src/test/java/org/apache/kafka/tools/streams/ListStreamsGroupTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/streams/ListStreamsGroupTest.java @@ -102,7 +102,7 @@ public class ListStreamsGroupTest { @Test public void testListStreamsGroupWithoutFilters() throws Exception { try (StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(new String[]{"--bootstrap-server", cluster.bootstrapServers(), "--list"})) { - Set expectedGroups = new HashSet<>(Collections.singleton(APP_ID)); + Set expectedGroups = Set.of(APP_ID); final AtomicReference foundGroups = new AtomicReference<>(); TestUtils.waitForCondition(() -> { diff --git a/tools/src/test/java/org/apache/kafka/tools/streams/StreamsGroupCommandTest.java b/tools/src/test/java/org/apache/kafka/tools/streams/StreamsGroupCommandTest.java index 14c9bedfb88..b30e19f5f60 100644 --- a/tools/src/test/java/org/apache/kafka/tools/streams/StreamsGroupCommandTest.java +++ b/tools/src/test/java/org/apache/kafka/tools/streams/StreamsGroupCommandTest.java @@ -86,7 +86,7 @@ public class StreamsGroupCommandTest { ))); when(adminClient.listGroups(any(ListGroupsOptions.class))).thenReturn(result); StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(cgcArgs, adminClient); - Set expectedGroups = new HashSet<>(Arrays.asList(firstGroup, secondGroup)); + Set expectedGroups = Set.of(firstGroup, secondGroup); final Set[] foundGroups = new Set[]{Set.of()}; TestUtils.waitForCondition(() -> { @@ -122,13 +122,13 @@ public class StreamsGroupCommandTest { ))); when(adminClient.listGroups(any(ListGroupsOptions.class))).thenReturn(resultWithAllStates); StreamsGroupCommand.StreamsGroupService service = getStreamsGroupService(cgcArgs, adminClient); - Set expectedListing = new HashSet<>(Arrays.asList( + Set expectedListing = Set.of( new GroupListing(firstGroup, Optional.of(GroupType.STREAMS), "streams", Optional.of(GroupState.STABLE)), - new GroupListing(secondGroup, Optional.of(GroupType.STREAMS), "streams", Optional.of(GroupState.EMPTY)))); + new GroupListing(secondGroup, Optional.of(GroupType.STREAMS), "streams", Optional.of(GroupState.EMPTY))); final Set[] foundListing = new Set[]{Set.of()}; TestUtils.waitForCondition(() -> { - foundListing[0] = new HashSet<>(service.listStreamsGroupsInStates(new HashSet<>(Arrays.asList(GroupState.values())))); + foundListing[0] = new HashSet<>(service.listStreamsGroupsInStates(Set.of(GroupState.values()))); return Objects.equals(expectedListing, foundListing[0]); }, "Expected to show groups " + expectedListing + ", but found " + foundListing[0]); @@ -275,7 +275,7 @@ public class StreamsGroupCommandTest { assertNotNull(internalTopics.get(groupId)); assertEquals(4, internalTopics.get(groupId).size()); - assertEquals(new HashSet<>(List.of(groupId + "-1-changelog", groupId + "-2-changelog", groupId + "-1-repartition", groupId + "-2-repartition")), + assertEquals(Set.of(groupId + "-1-changelog", groupId + "-2-changelog", groupId + "-1-repartition", groupId + "-2-repartition"), new HashSet<>(internalTopics.get(groupId))); assertFalse(internalTopics.get(groupId).stream().anyMatch(List.of("some-pre-fix-changelog", groupId + "-some-thing")::contains)); assertFalse(internalTopics.get(groupId).stream().anyMatch(sourceTopics::contains)); @@ -344,34 +344,34 @@ public class StreamsGroupCommandTest { @Test public void testGroupStatesFromString() { Set result = StreamsGroupCommand.groupStatesFromString("empty"); - assertEquals(new HashSet<>(List.of(GroupState.EMPTY)), result); + assertEquals(Set.of(GroupState.EMPTY), result); result = StreamsGroupCommand.groupStatesFromString("EMPTY"); - assertEquals(new HashSet<>(List.of(GroupState.EMPTY)), result); + assertEquals(Set.of(GroupState.EMPTY), result); result = StreamsGroupCommand.groupStatesFromString("notready"); - assertEquals(new HashSet<>(List.of(GroupState.NOT_READY)), result); + assertEquals(Set.of(GroupState.NOT_READY), result); result = StreamsGroupCommand.groupStatesFromString("notReady"); - assertEquals(new HashSet<>(List.of(GroupState.NOT_READY)), result); + assertEquals(Set.of(GroupState.NOT_READY), result); result = StreamsGroupCommand.groupStatesFromString("assigning"); - assertEquals(new HashSet<>(List.of(GroupState.ASSIGNING)), result); + assertEquals(Set.of(GroupState.ASSIGNING), result); result = StreamsGroupCommand.groupStatesFromString("ASSIGNING"); - assertEquals(new HashSet<>(List.of(GroupState.ASSIGNING)), result); + assertEquals(Set.of(GroupState.ASSIGNING), result); result = StreamsGroupCommand.groupStatesFromString("RECONCILING"); - assertEquals(new HashSet<>(List.of(GroupState.RECONCILING)), result); + assertEquals(Set.of(GroupState.RECONCILING), result); result = StreamsGroupCommand.groupStatesFromString("reconCILING"); - assertEquals(new HashSet<>(List.of(GroupState.RECONCILING)), result); + assertEquals(Set.of(GroupState.RECONCILING), result); result = StreamsGroupCommand.groupStatesFromString("STABLE"); - assertEquals(new HashSet<>(List.of(GroupState.STABLE)), result); + assertEquals(Set.of(GroupState.STABLE), result); result = StreamsGroupCommand.groupStatesFromString("stable"); - assertEquals(new HashSet<>(List.of(GroupState.STABLE)), result); + assertEquals(Set.of(GroupState.STABLE), result); result = StreamsGroupCommand.groupStatesFromString("DEAD"); - assertEquals(new HashSet<>(List.of(GroupState.DEAD)), result); + assertEquals(Set.of(GroupState.DEAD), result); result = StreamsGroupCommand.groupStatesFromString("dead"); - assertEquals(new HashSet<>(List.of(GroupState.DEAD)), result); + assertEquals(Set.of(GroupState.DEAD), result); assertThrow("preparingRebalance"); assertThrow("completingRebalance"); @@ -386,7 +386,7 @@ public class StreamsGroupCommandTest { } private static void assertThrow(final String wrongState) { - final Set validStates = new HashSet<>(Arrays.asList("Assigning", "Dead", "Empty", "Reconciling", "Stable", "NotReady")); + final Set validStates = Set.of("Assigning", "Dead", "Empty", "Reconciling", "Stable", "NotReady"); final Exception exception = assertThrows(IllegalArgumentException.class, () -> StreamsGroupCommand.groupStatesFromString(wrongState)); diff --git a/trogdor/src/test/java/org/apache/kafka/trogdor/common/StringExpanderTest.java b/trogdor/src/test/java/org/apache/kafka/trogdor/common/StringExpanderTest.java index 0464db269a1..4ece85846ad 100644 --- a/trogdor/src/test/java/org/apache/kafka/trogdor/common/StringExpanderTest.java +++ b/trogdor/src/test/java/org/apache/kafka/trogdor/common/StringExpanderTest.java @@ -20,9 +20,8 @@ package org.apache.kafka.trogdor.common; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; -import java.util.Arrays; import java.util.Collections; -import java.util.HashSet; +import java.util.Set; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -38,38 +37,38 @@ public class StringExpanderTest { @Test public void testExpansions() { - HashSet expected1 = new HashSet<>(Arrays.asList( + Set expected1 = Set.of( "foo1", "foo2", "foo3" - )); + ); assertEquals(expected1, StringExpander.expand("foo[1-3]")); - HashSet expected2 = new HashSet<>(Arrays.asList( + Set expected2 = Set.of( "foo bar baz 0" - )); + ); assertEquals(expected2, StringExpander.expand("foo bar baz [0-0]")); - HashSet expected3 = new HashSet<>(Arrays.asList( + Set expected3 = Set.of( "[[ wow50 ]]", "[[ wow51 ]]", "[[ wow52 ]]" - )); + ); assertEquals(expected3, StringExpander.expand("[[ wow[50-52] ]]")); - HashSet expected4 = new HashSet<>(Arrays.asList( + Set expected4 = Set.of( "foo1bar", "foo2bar", "foo3bar" - )); + ); assertEquals(expected4, StringExpander.expand("foo[1-3]bar")); // should expand latest range first - HashSet expected5 = new HashSet<>(Arrays.asList( + Set expected5 = Set.of( "start[1-3]middle1epilogue", "start[1-3]middle2epilogue", "start[1-3]middle3epilogue" - )); + ); assertEquals(expected5, StringExpander.expand("start[1-3]middle[1-3]epilogue")); } }