mirror of https://github.com/apache/kafka.git
MINOR: Cleanup JMH-Benchmarks Module (#19791)
Now that Kafka supports Java 17, this PR makes some changes in jmh-benchmarks module. The changes mostly include: - Collections.emptyList(), Collections.singletonList() and Arrays.asList() are replaced with List.of() - Collections.emptyMap() and Collections.singletonMap() are replaced with Map.of() - Collections.singleton() is replaced with Set.of() Reviewers: Jhen-Yung Hsu <jhenyunghsu@gmail.com>, Ken Huang <s7133700@gmail.com>, Chia-Ping Tsai <chia7712@gmail.com>
This commit is contained in:
parent
28c53ba09a
commit
220ff4f774
|
@ -54,7 +54,6 @@ import org.openjdk.jmh.annotations.Warmup;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.InetAddress;
|
import java.net.InetAddress;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
@ -100,7 +99,7 @@ public class AuthorizerBenchmark {
|
||||||
// most map entries. In such cases, we rely on the filtering based on `String.startsWith`
|
// most map entries. In such cases, we rely on the filtering based on `String.startsWith`
|
||||||
// to return the matching ACLs. Using a more efficient data structure (e.g. a prefix
|
// to return the matching ACLs. Using a more efficient data structure (e.g. a prefix
|
||||||
// tree) should improve performance significantly.
|
// tree) should improve performance significantly.
|
||||||
actions = Collections.singletonList(new Action(AclOperation.WRITE,
|
actions = List.of(new Action(AclOperation.WRITE,
|
||||||
new ResourcePattern(ResourceType.TOPIC, resourceNamePrefix + 95, PatternType.LITERAL),
|
new ResourcePattern(ResourceType.TOPIC, resourceNamePrefix + 95, PatternType.LITERAL),
|
||||||
1, true, true));
|
1, true, true));
|
||||||
authorizeContext = new RequestContext(new RequestHeader(ApiKeys.PRODUCE, Integer.valueOf(1).shortValue(),
|
authorizeContext = new RequestContext(new RequestHeader(ApiKeys.PRODUCE, Integer.valueOf(1).shortValue(),
|
||||||
|
|
|
@ -47,7 +47,6 @@ import java.util.List;
|
||||||
import java.util.Random;
|
import java.util.Random;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.stream.Collectors;
|
|
||||||
import java.util.stream.IntStream;
|
import java.util.stream.IntStream;
|
||||||
|
|
||||||
import static org.apache.kafka.common.acl.AclOperation.READ;
|
import static org.apache.kafka.common.acl.AclOperation.READ;
|
||||||
|
@ -98,7 +97,7 @@ public class StandardAuthorizerUpdateBenchmark {
|
||||||
return aclsForResource(resourcePattern);
|
return aclsForResource(resourcePattern);
|
||||||
})
|
})
|
||||||
.flatMap(Collection::stream)
|
.flatMap(Collection::stream)
|
||||||
.collect(Collectors.toList());
|
.toList();
|
||||||
}
|
}
|
||||||
|
|
||||||
private List<StandardAclWithId> aclsForResource(ResourcePattern pattern) {
|
private List<StandardAclWithId> aclsForResource(ResourcePattern pattern) {
|
||||||
|
@ -109,7 +108,7 @@ public class StandardAuthorizerUpdateBenchmark {
|
||||||
return new StandardAcl(pattern.resourceType(), pattern.name(), pattern.patternType(), p, h, READ, ALLOW);
|
return new StandardAcl(pattern.resourceType(), pattern.name(), pattern.patternType(), p, h, READ, ALLOW);
|
||||||
})
|
})
|
||||||
.map(this::withId)
|
.map(this::withId)
|
||||||
.collect(Collectors.toList());
|
.toList();
|
||||||
}
|
}
|
||||||
|
|
||||||
private StandardAclWithId withId(StandardAcl acl) {
|
private StandardAclWithId withId(StandardAcl acl) {
|
||||||
|
|
|
@ -35,8 +35,6 @@ import org.apache.kafka.image.MetadataImage;
|
||||||
import org.apache.kafka.image.MetadataProvenance;
|
import org.apache.kafka.image.MetadataProvenance;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
@ -145,7 +143,7 @@ public class AssignorBenchmarkUtils {
|
||||||
return new GroupSpecImpl(
|
return new GroupSpecImpl(
|
||||||
memberSpecs,
|
memberSpecs,
|
||||||
subscriptionType,
|
subscriptionType,
|
||||||
Collections.emptyMap()
|
Map.of()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -335,7 +333,7 @@ public class AssignorBenchmarkUtils {
|
||||||
delta.replay(new PartitionRecord()
|
delta.replay(new PartitionRecord()
|
||||||
.setTopicId(topicId)
|
.setTopicId(topicId)
|
||||||
.setPartitionId(i)
|
.setPartitionId(i)
|
||||||
.setReplicas(Arrays.asList(i % 4, (i + 1) % 4)));
|
.setReplicas(List.of(i % 4, (i + 1) % 4)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -40,11 +40,11 @@ import org.openjdk.jmh.annotations.Threads;
|
||||||
import org.openjdk.jmh.annotations.Warmup;
|
import org.openjdk.jmh.annotations.Warmup;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
|
import java.util.Set;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
import static org.apache.kafka.clients.consumer.internals.AbstractStickyAssignor.DEFAULT_GENERATION;
|
import static org.apache.kafka.clients.consumer.internals.AbstractStickyAssignor.DEFAULT_GENERATION;
|
||||||
|
@ -161,7 +161,7 @@ public class ClientSideAssignorBenchmark {
|
||||||
partitions.addAll(partitionInfos(topicName, partitionsPerTopicCount, nodes));
|
partitions.addAll(partitionInfos(topicName, partitionsPerTopicCount, nodes));
|
||||||
}
|
}
|
||||||
|
|
||||||
metadata = new Cluster("test-cluster", nodes, partitions, Collections.emptySet(), Collections.emptySet());
|
metadata = new Cluster("test-cluster", nodes, partitions, Set.of(), Set.of());
|
||||||
}
|
}
|
||||||
|
|
||||||
private void addMemberSubscriptions() {
|
private void addMemberSubscriptions() {
|
||||||
|
@ -230,7 +230,7 @@ public class ClientSideAssignorBenchmark {
|
||||||
return new ConsumerPartitionAssignor.Subscription(
|
return new ConsumerPartitionAssignor.Subscription(
|
||||||
topics,
|
topics,
|
||||||
null,
|
null,
|
||||||
Collections.emptyList(),
|
List.of(),
|
||||||
DEFAULT_GENERATION,
|
DEFAULT_GENERATION,
|
||||||
rackId
|
rackId
|
||||||
);
|
);
|
||||||
|
|
|
@ -48,7 +48,6 @@ import org.openjdk.jmh.annotations.State;
|
||||||
import org.openjdk.jmh.annotations.Threads;
|
import org.openjdk.jmh.annotations.Threads;
|
||||||
import org.openjdk.jmh.annotations.Warmup;
|
import org.openjdk.jmh.annotations.Warmup;
|
||||||
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
@ -121,7 +120,7 @@ public class ServerSideAssignorBenchmark {
|
||||||
|
|
||||||
private GroupSpec groupSpec;
|
private GroupSpec groupSpec;
|
||||||
|
|
||||||
private List<String> allTopicNames = Collections.emptyList();
|
private List<String> allTopicNames = List.of();
|
||||||
|
|
||||||
private MetadataImage metadataImage = MetadataImage.EMPTY;
|
private MetadataImage metadataImage = MetadataImage.EMPTY;
|
||||||
|
|
||||||
|
@ -196,14 +195,14 @@ public class ServerSideAssignorBenchmark {
|
||||||
for (String memberId : groupSpec.memberIds()) {
|
for (String memberId : groupSpec.memberIds()) {
|
||||||
MemberAssignment memberAssignment = members.getOrDefault(
|
MemberAssignment memberAssignment = members.getOrDefault(
|
||||||
memberId,
|
memberId,
|
||||||
new MemberAssignmentImpl(Collections.emptyMap())
|
new MemberAssignmentImpl(Map.of())
|
||||||
);
|
);
|
||||||
|
|
||||||
updatedMemberSpec.put(memberId, new MemberSubscriptionAndAssignmentImpl(
|
updatedMemberSpec.put(memberId, new MemberSubscriptionAndAssignmentImpl(
|
||||||
groupSpec.memberSubscription(memberId).rackId(),
|
groupSpec.memberSubscription(memberId).rackId(),
|
||||||
Optional.empty(),
|
Optional.empty(),
|
||||||
groupSpec.memberSubscription(memberId).subscribedTopicIds(),
|
groupSpec.memberSubscription(memberId).subscribedTopicIds(),
|
||||||
new Assignment(Collections.unmodifiableMap(memberAssignment.partitions()))
|
new Assignment(Map.copyOf(memberAssignment.partitions()))
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -47,7 +47,6 @@ import org.openjdk.jmh.annotations.State;
|
||||||
import org.openjdk.jmh.annotations.Threads;
|
import org.openjdk.jmh.annotations.Threads;
|
||||||
import org.openjdk.jmh.annotations.Warmup;
|
import org.openjdk.jmh.annotations.Warmup;
|
||||||
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
@ -114,7 +113,7 @@ public class ShareGroupAssignorBenchmark {
|
||||||
|
|
||||||
private GroupSpec groupSpec;
|
private GroupSpec groupSpec;
|
||||||
|
|
||||||
private List<String> allTopicNames = Collections.emptyList();
|
private List<String> allTopicNames = List.of();
|
||||||
|
|
||||||
private TopicIds.TopicResolver topicResolver;
|
private TopicIds.TopicResolver topicResolver;
|
||||||
|
|
||||||
|
@ -179,14 +178,14 @@ public class ShareGroupAssignorBenchmark {
|
||||||
for (String memberId : groupSpec.memberIds()) {
|
for (String memberId : groupSpec.memberIds()) {
|
||||||
MemberAssignment memberAssignment = members.getOrDefault(
|
MemberAssignment memberAssignment = members.getOrDefault(
|
||||||
memberId,
|
memberId,
|
||||||
new MemberAssignmentImpl(Collections.emptyMap())
|
new MemberAssignmentImpl(Map.of())
|
||||||
);
|
);
|
||||||
|
|
||||||
updatedMemberSpec.put(memberId, new MemberSubscriptionAndAssignmentImpl(
|
updatedMemberSpec.put(memberId, new MemberSubscriptionAndAssignmentImpl(
|
||||||
groupSpec.memberSubscription(memberId).rackId(),
|
groupSpec.memberSubscription(memberId).rackId(),
|
||||||
Optional.empty(),
|
Optional.empty(),
|
||||||
groupSpec.memberSubscription(memberId).subscribedTopicIds(),
|
groupSpec.memberSubscription(memberId).subscribedTopicIds(),
|
||||||
new Assignment(Collections.unmodifiableMap(memberAssignment.partitions()))
|
new Assignment(Map.copyOf(memberAssignment.partitions()))
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -45,7 +45,6 @@ import org.openjdk.jmh.annotations.State;
|
||||||
import org.openjdk.jmh.annotations.Threads;
|
import org.openjdk.jmh.annotations.Threads;
|
||||||
import org.openjdk.jmh.annotations.Warmup;
|
import org.openjdk.jmh.annotations.Warmup;
|
||||||
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
@ -90,7 +89,7 @@ public class TargetAssignmentBuilderBenchmark {
|
||||||
|
|
||||||
private Map<Uuid, Map<Integer, String>> invertedTargetAssignment;
|
private Map<Uuid, Map<Integer, String>> invertedTargetAssignment;
|
||||||
|
|
||||||
private List<String> allTopicNames = Collections.emptyList();
|
private List<String> allTopicNames = List.of();
|
||||||
|
|
||||||
private MetadataImage metadataImage;
|
private MetadataImage metadataImage;
|
||||||
|
|
||||||
|
|
|
@ -38,12 +38,10 @@ import org.openjdk.jmh.runner.Runner;
|
||||||
import org.openjdk.jmh.runner.RunnerException;
|
import org.openjdk.jmh.runner.RunnerException;
|
||||||
import org.openjdk.jmh.runner.options.OptionsBuilder;
|
import org.openjdk.jmh.runner.options.OptionsBuilder;
|
||||||
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.stream.Collectors;
|
|
||||||
import java.util.stream.IntStream;
|
import java.util.stream.IntStream;
|
||||||
|
|
||||||
@State(Scope.Benchmark)
|
@State(Scope.Benchmark)
|
||||||
|
@ -66,14 +64,14 @@ public class MetadataResponseBenchmark {
|
||||||
new TopicPartition("benchmark", 42),
|
new TopicPartition("benchmark", 42),
|
||||||
Optional.of(4),
|
Optional.of(4),
|
||||||
Optional.of(42),
|
Optional.of(42),
|
||||||
IntStream.range(0, nodes).boxed().collect(Collectors.toList()),
|
IntStream.range(0, nodes).boxed().toList(),
|
||||||
IntStream.range(0, nodes).filter(i1 -> i1 % 3 != 0).boxed().collect(Collectors.toList()),
|
IntStream.range(0, nodes).filter(i1 -> i1 % 3 != 0).boxed().toList(),
|
||||||
IntStream.range(0, nodes).filter(i2 -> i2 % 3 == 0).boxed().collect(Collectors.toList()));
|
IntStream.range(0, nodes).filter(i2 -> i2 % 3 == 0).boxed().toList());
|
||||||
nodesById = new HashMap<>(nodes);
|
nodesById = new HashMap<>(nodes);
|
||||||
for (int i = 0; i < nodes; i++) {
|
for (int i = 0; i < nodes; i++) {
|
||||||
nodesById.put(i, new Node(i, "localhost", 1234));
|
nodesById.put(i, new Node(i, "localhost", 1234));
|
||||||
}
|
}
|
||||||
nodesById = Collections.unmodifiableMap(nodesById);
|
nodesById = Map.copyOf(nodesById);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Benchmark
|
@Benchmark
|
||||||
|
|
|
@ -36,7 +36,7 @@ import org.openjdk.jmh.infra.BenchmarkParams;
|
||||||
import org.openjdk.jmh.infra.Blackhole;
|
import org.openjdk.jmh.infra.Blackhole;
|
||||||
|
|
||||||
import java.nio.charset.Charset;
|
import java.nio.charset.Charset;
|
||||||
import java.util.Collections;
|
import java.util.Map;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
@State(Scope.Benchmark)
|
@State(Scope.Benchmark)
|
||||||
|
@ -426,7 +426,7 @@ public class JsonConverterBenchmark {
|
||||||
public void setup(BenchmarkParams params) {
|
public void setup(BenchmarkParams params) {
|
||||||
|
|
||||||
converter = new JsonConverter(Boolean.parseBoolean(params.getParam("blackbirdModule")));
|
converter = new JsonConverter(Boolean.parseBoolean(params.getParam("blackbirdModule")));
|
||||||
converter.configure(Collections.emptyMap(), false);
|
converter.configure(Map.of(), false);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Benchmark
|
@Benchmark
|
||||||
|
|
|
@ -38,9 +38,8 @@ import org.openjdk.jmh.annotations.Warmup;
|
||||||
import org.openjdk.jmh.infra.Blackhole;
|
import org.openjdk.jmh.infra.Blackhole;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.function.BiFunction;
|
import java.util.function.BiFunction;
|
||||||
import java.util.function.Function;
|
import java.util.function.Function;
|
||||||
|
@ -106,12 +105,12 @@ public class ValuesBenchmark {
|
||||||
new SchemaAndValue(Schema.STRING_SCHEMA, "{\"1\": 2, \"3\": 4}"),
|
new SchemaAndValue(Schema.STRING_SCHEMA, "{\"1\": 2, \"3\": 4}"),
|
||||||
new SchemaAndValue(SchemaBuilder.array(Schema.INT16_SCHEMA), new short[]{1, 2, 3}),
|
new SchemaAndValue(SchemaBuilder.array(Schema.INT16_SCHEMA), new short[]{1, 2, 3}),
|
||||||
new SchemaAndValue(SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.BOOLEAN_SCHEMA),
|
new SchemaAndValue(SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.BOOLEAN_SCHEMA),
|
||||||
Collections.singletonMap("key", true)),
|
Map.of("key", true)),
|
||||||
new SchemaAndValue(STRUCT_SCHEMA, new Struct(STRUCT_SCHEMA)
|
new SchemaAndValue(STRUCT_SCHEMA, new Struct(STRUCT_SCHEMA)
|
||||||
.put("first", 1)
|
.put("first", 1)
|
||||||
.put("second", "foo")
|
.put("second", "foo")
|
||||||
.put("array", Arrays.asList(1, 2, 3))
|
.put("array", List.of(1, 2, 3))
|
||||||
.put("map", Collections.singletonMap(1, "value"))
|
.put("map", Map.of(1, "value"))
|
||||||
.put("nested", new Struct(FLAT_STRUCT_SCHEMA).put("field", 12))),
|
.put("nested", new Struct(FLAT_STRUCT_SCHEMA).put("field", 12))),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -72,14 +72,14 @@ public class TestPurgatoryPerformance {
|
||||||
new DelayedOperationPurgatory<>("fake purgatory", 0, 1000);
|
new DelayedOperationPurgatory<>("fake purgatory", 0, 1000);
|
||||||
CompletionQueue queue = new CompletionQueue();
|
CompletionQueue queue = new CompletionQueue();
|
||||||
|
|
||||||
List<String> gcNames = gcMXBeans.stream().map(MemoryManagerMXBean::getName).collect(Collectors.toList());
|
List<String> gcNames = gcMXBeans.stream().map(MemoryManagerMXBean::getName).toList();
|
||||||
CountDownLatch latch = new CountDownLatch(numRequests);
|
CountDownLatch latch = new CountDownLatch(numRequests);
|
||||||
long initialCpuTimeNano = getProcessCpuTimeNanos(osMXBean).orElseThrow();
|
long initialCpuTimeNano = getProcessCpuTimeNanos(osMXBean).orElseThrow();
|
||||||
long start = System.currentTimeMillis();
|
long start = System.currentTimeMillis();
|
||||||
Random rand = new Random();
|
Random rand = new Random();
|
||||||
List<FakeOperationKey> keys = IntStream.range(0, numKeys)
|
List<FakeOperationKey> keys = IntStream.range(0, numKeys)
|
||||||
.mapToObj(i -> new FakeOperationKey(format("fakeKey%d", rand.nextInt(numPossibleKeys))))
|
.mapToObj(i -> new FakeOperationKey(format("fakeKey%d", rand.nextInt(numPossibleKeys))))
|
||||||
.collect(Collectors.toList());
|
.toList();
|
||||||
|
|
||||||
AtomicLong requestArrivalTime = new AtomicLong(start);
|
AtomicLong requestArrivalTime = new AtomicLong(start);
|
||||||
AtomicLong end = new AtomicLong(0);
|
AtomicLong end = new AtomicLong(0);
|
||||||
|
@ -313,7 +313,7 @@ public class TestPurgatoryPerformance {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void printStats() {
|
public void printStats() {
|
||||||
List<Long> samples = this.samples.stream().sorted().collect(Collectors.toList());
|
List<Long> samples = this.samples.stream().sorted().toList();
|
||||||
|
|
||||||
long p75 = samples.get((int) (samples.size() * 0.75d));
|
long p75 = samples.get((int) (samples.size() * 0.75d));
|
||||||
long p50 = samples.get((int) (samples.size() * 0.5d));
|
long p50 = samples.get((int) (samples.size() * 0.5d));
|
||||||
|
|
|
@ -82,8 +82,6 @@ import org.openjdk.jmh.annotations.Warmup;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.LinkedHashMap;
|
import java.util.LinkedHashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
@ -131,7 +129,7 @@ public class ReplicaFetcherThreadBenchmark {
|
||||||
List<File> logDirs = config.logDirs().stream().map(File::new).toList();
|
List<File> logDirs = config.logDirs().stream().map(File::new).toList();
|
||||||
logManager = new LogManagerBuilder().
|
logManager = new LogManagerBuilder().
|
||||||
setLogDirs(logDirs).
|
setLogDirs(logDirs).
|
||||||
setInitialOfflineDirs(Collections.emptyList()).
|
setInitialOfflineDirs(List.of()).
|
||||||
setConfigRepository(new MockConfigRepository()).
|
setConfigRepository(new MockConfigRepository()).
|
||||||
setInitialDefaultConfig(logConfig).
|
setInitialDefaultConfig(logConfig).
|
||||||
setCleanerConfig(new CleanerConfig(0, 0, 0, 0, 0, 0.0, 0, false)).
|
setCleanerConfig(new CleanerConfig(0, 0, 0, 0, 0, 0.0, 0, false)).
|
||||||
|
@ -165,7 +163,7 @@ public class ReplicaFetcherThreadBenchmark {
|
||||||
for (int i = 0; i < partitionCount; i++) {
|
for (int i = 0; i < partitionCount; i++) {
|
||||||
TopicPartition tp = new TopicPartition("topic", i);
|
TopicPartition tp = new TopicPartition("topic", i);
|
||||||
|
|
||||||
List<Integer> replicas = Arrays.asList(0, 1, 2);
|
List<Integer> replicas = List.of(0, 1, 2);
|
||||||
PartitionState partitionState = new PartitionState()
|
PartitionState partitionState = new PartitionState()
|
||||||
.setLeader(0)
|
.setLeader(0)
|
||||||
.setLeaderEpoch(0)
|
.setLeaderEpoch(0)
|
||||||
|
|
|
@ -82,8 +82,6 @@ import org.openjdk.jmh.annotations.TearDown;
|
||||||
import org.openjdk.jmh.annotations.Warmup;
|
import org.openjdk.jmh.annotations.Warmup;
|
||||||
|
|
||||||
import java.nio.ByteBuffer;
|
import java.nio.ByteBuffer;
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
import java.util.Properties;
|
import java.util.Properties;
|
||||||
|
@ -141,7 +139,7 @@ public class KRaftMetadataRequestBenchmark {
|
||||||
MetadataDelta buildupMetadataDelta = new MetadataDelta(MetadataImage.EMPTY);
|
MetadataDelta buildupMetadataDelta = new MetadataDelta(MetadataImage.EMPTY);
|
||||||
IntStream.range(0, 5).forEach(brokerId -> {
|
IntStream.range(0, 5).forEach(brokerId -> {
|
||||||
RegisterBrokerRecord.BrokerEndpointCollection endpoints = new RegisterBrokerRecord.BrokerEndpointCollection();
|
RegisterBrokerRecord.BrokerEndpointCollection endpoints = new RegisterBrokerRecord.BrokerEndpointCollection();
|
||||||
endpoints(brokerId).forEach(endpoint -> endpoints.add(endpoint));
|
endpoints.addAll(endpoints(brokerId));
|
||||||
buildupMetadataDelta.replay(new RegisterBrokerRecord().
|
buildupMetadataDelta.replay(new RegisterBrokerRecord().
|
||||||
setBrokerId(brokerId).
|
setBrokerId(brokerId).
|
||||||
setBrokerEpoch(100L).
|
setBrokerEpoch(100L).
|
||||||
|
@ -157,10 +155,10 @@ public class KRaftMetadataRequestBenchmark {
|
||||||
buildupMetadataDelta.replay(new PartitionRecord().
|
buildupMetadataDelta.replay(new PartitionRecord().
|
||||||
setPartitionId(partitionId).
|
setPartitionId(partitionId).
|
||||||
setTopicId(topicId).
|
setTopicId(topicId).
|
||||||
setReplicas(Arrays.asList(0, 1, 3)).
|
setReplicas(List.of(0, 1, 3)).
|
||||||
setIsr(Arrays.asList(0, 1, 3)).
|
setIsr(List.of(0, 1, 3)).
|
||||||
setRemovingReplicas(Collections.emptyList()).
|
setRemovingReplicas(List.of()).
|
||||||
setAddingReplicas(Collections.emptyList()).
|
setAddingReplicas(List.of()).
|
||||||
setLeader(partitionCount % 5).
|
setLeader(partitionCount % 5).
|
||||||
setLeaderEpoch(0)));
|
setLeaderEpoch(0)));
|
||||||
});
|
});
|
||||||
|
@ -168,7 +166,7 @@ public class KRaftMetadataRequestBenchmark {
|
||||||
}
|
}
|
||||||
|
|
||||||
private List<RegisterBrokerRecord.BrokerEndpoint> endpoints(final int brokerId) {
|
private List<RegisterBrokerRecord.BrokerEndpoint> endpoints(final int brokerId) {
|
||||||
return Collections.singletonList(
|
return List.of(
|
||||||
new RegisterBrokerRecord.BrokerEndpoint().
|
new RegisterBrokerRecord.BrokerEndpoint().
|
||||||
setHost("host_" + brokerId).
|
setHost("host_" + brokerId).
|
||||||
setPort(9092).
|
setPort(9092).
|
||||||
|
|
|
@ -37,7 +37,7 @@ import org.openjdk.jmh.annotations.State;
|
||||||
import org.openjdk.jmh.annotations.Warmup;
|
import org.openjdk.jmh.annotations.Warmup;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collections;
|
import java.util.List;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
@State(Scope.Benchmark)
|
@State(Scope.Benchmark)
|
||||||
|
@ -77,8 +77,8 @@ public class TopicsImageSingleRecordChangeBenchmark {
|
||||||
setTopicId(newTopicUuid).
|
setTopicId(newTopicUuid).
|
||||||
setReplicas(replicas).
|
setReplicas(replicas).
|
||||||
setIsr(isr).
|
setIsr(isr).
|
||||||
setRemovingReplicas(Collections.emptyList()).
|
setRemovingReplicas(List.of()).
|
||||||
setAddingReplicas(Collections.emptyList()).
|
setAddingReplicas(List.of()).
|
||||||
setLeader(0);
|
setLeader(0);
|
||||||
topicsDelta.replay(newPartitionRecord);
|
topicsDelta.replay(newPartitionRecord);
|
||||||
System.out.print("(Adding a single topic to metadata having " + totalTopicCount + " total topics) ");
|
System.out.print("(Adding a single topic to metadata having " + totalTopicCount + " total topics) ");
|
||||||
|
|
|
@ -37,7 +37,7 @@ import org.openjdk.jmh.annotations.State;
|
||||||
import org.openjdk.jmh.annotations.Warmup;
|
import org.openjdk.jmh.annotations.Warmup;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collections;
|
import java.util.List;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
import java.util.stream.IntStream;
|
import java.util.stream.IntStream;
|
||||||
|
@ -84,8 +84,8 @@ public class TopicsImageSnapshotLoadBenchmark {
|
||||||
setTopicId(topicId).
|
setTopicId(topicId).
|
||||||
setReplicas(replicas).
|
setReplicas(replicas).
|
||||||
setIsr(isr).
|
setIsr(isr).
|
||||||
setRemovingReplicas(Collections.emptyList()).
|
setRemovingReplicas(List.of()).
|
||||||
setAddingReplicas(Collections.emptyList()).
|
setAddingReplicas(List.of()).
|
||||||
setLeader(currentLeader.get()));
|
setLeader(currentLeader.get()));
|
||||||
currentLeader.set((1 + currentLeader.get()) % numBrokers);
|
currentLeader.set((1 + currentLeader.get()) % numBrokers);
|
||||||
});
|
});
|
||||||
|
|
|
@ -36,12 +36,10 @@ import org.openjdk.jmh.annotations.State;
|
||||||
import org.openjdk.jmh.annotations.Warmup;
|
import org.openjdk.jmh.annotations.Warmup;
|
||||||
|
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.stream.Collectors;
|
|
||||||
|
|
||||||
@State(Scope.Benchmark)
|
@State(Scope.Benchmark)
|
||||||
@Fork(value = 1)
|
@Fork(value = 1)
|
||||||
|
@ -75,16 +73,16 @@ public class TopicsImageZonalOutageBenchmark {
|
||||||
Set<Uuid> perturbedTopics = new HashSet<>();
|
Set<Uuid> perturbedTopics = new HashSet<>();
|
||||||
builtupTopicsImage.topicsById().forEach((topicId, topicImage) ->
|
builtupTopicsImage.topicsById().forEach((topicId, topicImage) ->
|
||||||
topicImage.partitions().forEach((partitionNumber, partitionRegistration) -> {
|
topicImage.partitions().forEach((partitionNumber, partitionRegistration) -> {
|
||||||
List<Integer> newIsr = Arrays.stream(partitionRegistration.isr).boxed().filter(n -> n != 0).collect(Collectors.toList());
|
List<Integer> newIsr = Arrays.stream(partitionRegistration.isr).boxed().filter(n -> n != 0).toList();
|
||||||
if (newIsr.size() < replicationFactor) {
|
if (newIsr.size() < replicationFactor) {
|
||||||
perturbedTopics.add(topicId);
|
perturbedTopics.add(topicId);
|
||||||
topicsDelta.replay(new PartitionRecord().
|
topicsDelta.replay(new PartitionRecord().
|
||||||
setPartitionId(partitionNumber).
|
setPartitionId(partitionNumber).
|
||||||
setTopicId(topicId).
|
setTopicId(topicId).
|
||||||
setReplicas(Arrays.stream(partitionRegistration.replicas).boxed().collect(Collectors.toList())).
|
setReplicas(Arrays.stream(partitionRegistration.replicas).boxed().toList()).
|
||||||
setIsr(newIsr).
|
setIsr(newIsr).
|
||||||
setRemovingReplicas(Collections.emptyList()).
|
setRemovingReplicas(List.of()).
|
||||||
setAddingReplicas(Collections.emptyList()).
|
setAddingReplicas(List.of()).
|
||||||
setLeader(newIsr.get(0)));
|
setLeader(newIsr.get(0)));
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
|
@ -58,8 +58,6 @@ import org.openjdk.jmh.annotations.Warmup;
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.nio.charset.StandardCharsets;
|
import java.nio.charset.StandardCharsets;
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
import java.util.Properties;
|
import java.util.Properties;
|
||||||
|
@ -80,7 +78,7 @@ import scala.jdk.javaapi.OptionConverters;
|
||||||
public class PartitionMakeFollowerBenchmark {
|
public class PartitionMakeFollowerBenchmark {
|
||||||
private final File logDir = new File(System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString());
|
private final File logDir = new File(System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString());
|
||||||
private final KafkaScheduler scheduler = new KafkaScheduler(1, true, "scheduler");
|
private final KafkaScheduler scheduler = new KafkaScheduler(1, true, "scheduler");
|
||||||
private final List<Integer> replicas = Arrays.asList(0, 1, 2);
|
private final List<Integer> replicas = List.of(0, 1, 2);
|
||||||
private final OffsetCheckpoints offsetCheckpoints = Mockito.mock(OffsetCheckpoints.class);
|
private final OffsetCheckpoints offsetCheckpoints = Mockito.mock(OffsetCheckpoints.class);
|
||||||
private final DelayedOperations delayedOperations = Mockito.mock(DelayedOperations.class);
|
private final DelayedOperations delayedOperations = Mockito.mock(DelayedOperations.class);
|
||||||
private final ExecutorService executorService = Executors.newSingleThreadExecutor();
|
private final ExecutorService executorService = Executors.newSingleThreadExecutor();
|
||||||
|
@ -99,8 +97,8 @@ public class PartitionMakeFollowerBenchmark {
|
||||||
BrokerTopicStats brokerTopicStats = new BrokerTopicStats(false);
|
BrokerTopicStats brokerTopicStats = new BrokerTopicStats(false);
|
||||||
LogDirFailureChannel logDirFailureChannel = Mockito.mock(LogDirFailureChannel.class);
|
LogDirFailureChannel logDirFailureChannel = Mockito.mock(LogDirFailureChannel.class);
|
||||||
logManager = new LogManagerBuilder().
|
logManager = new LogManagerBuilder().
|
||||||
setLogDirs(Collections.singletonList(logDir)).
|
setLogDirs(List.of(logDir)).
|
||||||
setInitialOfflineDirs(Collections.emptyList()).
|
setInitialOfflineDirs(List.of()).
|
||||||
setConfigRepository(new MockConfigRepository()).
|
setConfigRepository(new MockConfigRepository()).
|
||||||
setInitialDefaultConfig(logConfig).
|
setInitialDefaultConfig(logConfig).
|
||||||
setCleanerConfig(new CleanerConfig(0, 0, 0, 0, 0, 0.0, 0, false)).
|
setCleanerConfig(new CleanerConfig(0, 0, 0, 0, 0, 0.0, 0, false)).
|
||||||
|
|
|
@ -55,7 +55,6 @@ import org.openjdk.jmh.annotations.Warmup;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
import java.util.Properties;
|
import java.util.Properties;
|
||||||
|
@ -89,8 +88,8 @@ public class UpdateFollowerFetchStateBenchmark {
|
||||||
scheduler.startup();
|
scheduler.startup();
|
||||||
LogConfig logConfig = createLogConfig();
|
LogConfig logConfig = createLogConfig();
|
||||||
logManager = new LogManagerBuilder().
|
logManager = new LogManagerBuilder().
|
||||||
setLogDirs(Collections.singletonList(logDir)).
|
setLogDirs(List.of(logDir)).
|
||||||
setInitialOfflineDirs(Collections.emptyList()).
|
setInitialOfflineDirs(List.of()).
|
||||||
setConfigRepository(new MockConfigRepository()).
|
setConfigRepository(new MockConfigRepository()).
|
||||||
setInitialDefaultConfig(logConfig).
|
setInitialDefaultConfig(logConfig).
|
||||||
setCleanerConfig(new CleanerConfig(0, 0, 0, 0, 0, 0.0, 0, false)).
|
setCleanerConfig(new CleanerConfig(0, 0, 0, 0, 0, 0.0, 0, false)).
|
||||||
|
|
|
@ -37,10 +37,8 @@ import org.openjdk.jmh.annotations.State;
|
||||||
import org.openjdk.jmh.annotations.Warmup;
|
import org.openjdk.jmh.annotations.Warmup;
|
||||||
|
|
||||||
import java.nio.charset.StandardCharsets;
|
import java.nio.charset.StandardCharsets;
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.stream.Collectors;
|
|
||||||
import java.util.stream.IntStream;
|
import java.util.stream.IntStream;
|
||||||
|
|
||||||
@State(Scope.Benchmark)
|
@State(Scope.Benchmark)
|
||||||
|
@ -52,15 +50,15 @@ import java.util.stream.IntStream;
|
||||||
public class ProducerRequestBenchmark {
|
public class ProducerRequestBenchmark {
|
||||||
private static final int NUMBER_OF_PARTITIONS = 3;
|
private static final int NUMBER_OF_PARTITIONS = 3;
|
||||||
private static final int NUMBER_OF_RECORDS = 3;
|
private static final int NUMBER_OF_RECORDS = 3;
|
||||||
private static final List<ProduceRequestData.TopicProduceData> TOPIC_PRODUCE_DATA = Collections.singletonList(new ProduceRequestData.TopicProduceData()
|
private static final List<ProduceRequestData.TopicProduceData> TOPIC_PRODUCE_DATA = List.of(new ProduceRequestData.TopicProduceData()
|
||||||
.setTopicId(Uuid.fromString("4NeOmt3TH3vW7AKKORPaCW"))
|
.setTopicId(Uuid.fromString("4NeOmt3TH3vW7AKKORPaCW"))
|
||||||
.setPartitionData(IntStream.range(0, NUMBER_OF_PARTITIONS).mapToObj(partitionIndex -> new ProduceRequestData.PartitionProduceData()
|
.setPartitionData(IntStream.range(0, NUMBER_OF_PARTITIONS).mapToObj(partitionIndex -> new ProduceRequestData.PartitionProduceData()
|
||||||
.setIndex(partitionIndex)
|
.setIndex(partitionIndex)
|
||||||
.setRecords(MemoryRecords.withRecords(Compression.NONE, IntStream.range(0, NUMBER_OF_RECORDS)
|
.setRecords(MemoryRecords.withRecords(Compression.NONE, IntStream.range(0, NUMBER_OF_RECORDS)
|
||||||
.mapToObj(recordIndex -> new SimpleRecord(100, "hello0".getBytes(StandardCharsets.UTF_8)))
|
.mapToObj(recordIndex -> new SimpleRecord(100, "hello0".getBytes(StandardCharsets.UTF_8)))
|
||||||
.collect(Collectors.toList())
|
.toList()
|
||||||
.toArray(new SimpleRecord[0]))))
|
.toArray(new SimpleRecord[0]))))
|
||||||
.collect(Collectors.toList()))
|
.toList())
|
||||||
);
|
);
|
||||||
private static final ProduceRequestData PRODUCE_REQUEST_DATA = new ProduceRequestData()
|
private static final ProduceRequestData PRODUCE_REQUEST_DATA = new ProduceRequestData()
|
||||||
.setTimeoutMs(100)
|
.setTimeoutMs(100)
|
||||||
|
|
|
@ -60,7 +60,7 @@ public class ProducerResponseBenchmark {
|
||||||
0,
|
0,
|
||||||
IntStream.range(0, NUMBER_OF_RECORDS)
|
IntStream.range(0, NUMBER_OF_RECORDS)
|
||||||
.mapToObj(ProduceResponse.RecordError::new)
|
.mapToObj(ProduceResponse.RecordError::new)
|
||||||
.collect(Collectors.toList()))
|
.toList())
|
||||||
))
|
))
|
||||||
.collect(Collectors.toMap(AbstractMap.SimpleEntry::getKey, AbstractMap.SimpleEntry::getValue));
|
.collect(Collectors.toMap(AbstractMap.SimpleEntry::getKey, AbstractMap.SimpleEntry::getValue));
|
||||||
|
|
||||||
|
|
|
@ -60,7 +60,6 @@ import org.openjdk.jmh.annotations.Warmup;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
import java.util.Properties;
|
import java.util.Properties;
|
||||||
|
@ -123,7 +122,7 @@ public class PartitionCreationBench {
|
||||||
ConfigRepository configRepository = new MockConfigRepository();
|
ConfigRepository configRepository = new MockConfigRepository();
|
||||||
this.logManager = new LogManagerBuilder().
|
this.logManager = new LogManagerBuilder().
|
||||||
setLogDirs(files).
|
setLogDirs(files).
|
||||||
setInitialOfflineDirs(Collections.emptyList()).
|
setInitialOfflineDirs(List.of()).
|
||||||
setConfigRepository(configRepository).
|
setConfigRepository(configRepository).
|
||||||
setInitialDefaultConfig(createLogConfig()).
|
setInitialDefaultConfig(createLogConfig()).
|
||||||
setCleanerConfig(cleanerConfig).
|
setCleanerConfig(cleanerConfig).
|
||||||
|
|
Loading…
Reference in New Issue