floorEntry = cachedState.floorEntry(baseOffset);
- // We might find a batch with floor entry but not necessarily that batch has an overlap,
- // if the request batch base offset is ahead of last offset from floor entry i.e. cached
- // batch of 10-14 and request batch of 15-18, though floor entry is found but no overlap.
- // Such scenario will be handled in the next step when considering the subMap. However,
- // if the floor entry is found and the request batch base offset is within the floor entry
- // then adjust the base offset to the floor entry so that acquire method can still work on
- // previously cached batch boundaries.
- if (floorEntry != null && floorEntry.getValue().lastOffset() >= baseOffset) {
+ if (floorEntry == null) {
+ // The initialize method check that there couldn't be any batches prior to the start offset.
+ // And once share partition starts fetching records, it will always fetch records, at least,
+ // from the start offset, but there could be cases where the batch base offset is prior
+ // to the start offset. This can happen when the share partition is initialized with
+ // partial persisted state and moved start offset i.e. start offset is not the batch's
+ // first offset. In such case, we need to adjust the base offset to the start offset.
+ // It's safe to adjust the base offset to the start offset when there isn't any floor
+ // i.e. no cached batches available prior to the request batch base offset. Hence,
+ // check for the floor entry and adjust the base offset accordingly.
+ if (baseOffset < startOffset) {
+ log.info("Adjusting base offset for the fetch as it's prior to start offset: {}-{}"
+ + "from {} to {}", groupId, topicIdPartition, baseOffset, startOffset);
+ baseOffset = startOffset;
+ }
+ } else if (floorEntry.getValue().lastOffset() >= baseOffset) {
+ // We might find a batch with floor entry but not necessarily that batch has an overlap,
+ // if the request batch base offset is ahead of last offset from floor entry i.e. cached
+ // batch of 10-14 and request batch of 15-18, though floor entry is found but no overlap.
+ // Such scenario will be handled in the next step when considering the subMap. However,
+ // if the floor entry is found and the request batch base offset is within the floor entry
+ // then adjust the base offset to the floor entry so that acquire method can still work on
+ // previously cached batch boundaries.
baseOffset = floorEntry.getKey();
}
// Validate if the fetch records are already part of existing batches and if available.
@@ -743,10 +769,10 @@ public class SharePartition {
}
InFlightBatch inFlightBatch = entry.getValue();
- // If the initialReadGapOffset window is active, we need to treat the gaps in between the window as
+ // If the gapWindow window is active, we need to treat the gaps in between the window as
// acquirable. Once the window is inactive (when we have acquired all the gaps inside the window),
// the remaining gaps are natural (data does not exist at those offsets) and we need not acquire them.
- if (isInitialReadGapOffsetWindowActive()) {
+ if (isPersisterReadGapWindowActive()) {
// If nextBatchStartOffset is less than the key of the entry, this means the fetch happened for a gap in the cachedState.
// Thus, a new batch needs to be acquired for the gap.
if (maybeGapStartOffset < entry.getKey()) {
@@ -755,7 +781,8 @@ public class SharePartition {
result.addAll(shareAcquiredRecords.acquiredRecords());
acquiredCount += shareAcquiredRecords.count();
}
- // Set nextBatchStartOffset as the last offset of the current in-flight batch + 1
+ // Set nextBatchStartOffset as the last offset of the current in-flight batch + 1.
+ // Hence, after the loop iteration the next gap can be considered.
maybeGapStartOffset = inFlightBatch.lastOffset() + 1;
// If the acquired count is equal to the max fetch records then break the loop.
if (acquiredCount >= maxRecordsToAcquire) {
@@ -831,7 +858,7 @@ public class SharePartition {
acquiredCount += shareAcquiredRecords.count();
}
if (!result.isEmpty()) {
- maybeUpdateReadGapFetchOffset(result.get(result.size() - 1).lastOffset() + 1);
+ maybeUpdatePersisterGapWindowStartOffset(result.get(result.size() - 1).lastOffset() + 1);
return maybeFilterAbortedTransactionalAcquiredRecords(fetchPartitionData, isolationLevel, new ShareAcquiredRecords(result, acquiredCount));
}
return new ShareAcquiredRecords(result, acquiredCount);
@@ -1057,10 +1084,24 @@ public class SharePartition {
/**
* Updates the cached state, start and end offsets of the share partition as per the new log
* start offset. The method is called when the log start offset is moved for the share partition.
+ *
+ * This method only archives the available records in the cached state that are before the new log
+ * start offset. It does not persist the archived state batches to the persister, rather it
+ * updates the cached state and offsets to reflect the new log start offset. The state in persister
+ * will be updated lazily during the acknowledge/release records API calls or acquisition lock timeout.
+ *
+ * The AVAILABLE state records can either have ongoing state transition or not. Hence, the archive
+ * records method will update the state of the records to ARCHIVED and set the terminal state flag
+ * hence if the transition is rolled back then the state will not be AVAILABLE again. However,
+ * the ACQUIRED state records will not be archived as they are still in-flight and acknowledge
+ * method also do not allow the state update for any offsets post the log start offset, hence those
+ * records will only be archived once acquisition lock timeout occurs.
*
* @param logStartOffset The new log start offset.
*/
void updateCacheAndOffsets(long logStartOffset) {
+ log.debug("Updating cached states for share partition: {}-{} with new log start offset: {}",
+ groupId, topicIdPartition, logStartOffset);
lock.writeLock().lock();
try {
if (logStartOffset <= startOffset) {
@@ -1202,11 +1243,11 @@ public class SharePartition {
}
inFlightBatch.maybeInitializeOffsetStateUpdate();
}
- isAnyOffsetArchived = isAnyOffsetArchived || archivePerOffsetBatchRecords(inFlightBatch, startOffset, endOffset - 1, initialState);
+ isAnyOffsetArchived = archivePerOffsetBatchRecords(inFlightBatch, startOffset, endOffset - 1, initialState) || isAnyOffsetArchived;
continue;
}
// The in-flight batch is a full match hence change the state of the complete batch.
- isAnyBatchArchived = isAnyBatchArchived || archiveCompleteBatch(inFlightBatch, initialState);
+ isAnyBatchArchived = archiveCompleteBatch(inFlightBatch, initialState) || isAnyBatchArchived;
}
return isAnyOffsetArchived || isAnyBatchArchived;
} finally {
@@ -1428,16 +1469,20 @@ public class SharePartition {
}
// Method to reduce the window that tracks gaps in the cachedState
- private void maybeUpdateReadGapFetchOffset(long offset) {
+ private void maybeUpdatePersisterGapWindowStartOffset(long offset) {
lock.writeLock().lock();
try {
- if (initialReadGapOffset != null) {
- if (initialReadGapOffset.endOffset() == endOffset) {
- initialReadGapOffset.gapStartOffset(offset);
+ if (persisterReadResultGapWindow != null) {
+ // When last cached batch for persister's read gap window is acquired, then endOffset is
+ // same as the gapWindow's endOffset, but the gap offset to update in the method call
+ // is endOffset + 1. Hence, do not update the gap start offset if the request offset
+ // is ahead of the endOffset.
+ if (persisterReadResultGapWindow.endOffset() == endOffset && offset <= persisterReadResultGapWindow.endOffset()) {
+ persisterReadResultGapWindow.gapStartOffset(offset);
} else {
- // The initial read gap offset is not valid anymore as the end offset has moved
- // beyond the initial read gap offset. Hence, reset the initial read gap offset.
- initialReadGapOffset = null;
+ // The persister's read gap window is not valid anymore as the end offset has moved
+ // beyond the read gap window's endOffset. Hence, set the gap window to null.
+ persisterReadResultGapWindow = null;
}
}
} finally {
@@ -1445,6 +1490,15 @@ public class SharePartition {
}
}
+ /**
+ * The method calculates the last offset and maximum records to acquire. The adjustment is needed
+ * to ensure that the records acquired do not exceed the maximum in-flight messages limit.
+ *
+ * @param fetchOffset The offset from which the records are fetched.
+ * @param maxFetchRecords The maximum number of records to acquire.
+ * @param lastOffset The last offset to acquire records to, which is the last offset of the fetched batch.
+ * @return LastOffsetAndMaxRecords object, containing the last offset to acquire and the maximum records to acquire.
+ */
private LastOffsetAndMaxRecords lastOffsetAndMaxRecordsToAcquire(long fetchOffset, int maxFetchRecords, long lastOffset) {
// There can always be records fetched exceeding the max in-flight messages limit. Hence,
// we need to check if the share partition has reached the max in-flight messages limit
@@ -1512,6 +1566,20 @@ public class SharePartition {
// which falls under the max messages limit. As the max fetch records is the soft
// limit, the last offset can be higher than the max messages.
lastAcquiredOffset = lastOffsetFromBatchWithRequestOffset(batches, firstAcquiredOffset + maxFetchRecords - 1);
+ // If the initial read gap offset window is active then it's not guaranteed that the
+ // batches align on batch boundaries. Hence, reset to last offset itself if the batch's
+ // last offset is greater than the last offset for acquisition, else there could be
+ // a situation where the batch overlaps with the initial read gap offset window batch.
+ // For example, if the initial read gap offset window is 10-30 i.e. gapWindow's
+ // startOffset is 10 and endOffset is 30, and the first persister's read batch is 15-30.
+ // Say first fetched batch from log is 10-30 and maxFetchRecords is 1, then the lastOffset
+ // in this method call would be 14. As the maxFetchRecords is lesser than the batch,
+ // hence last batch offset for request offset is fetched. In this example it will
+ // be 30, hence check if the initial read gap offset window is active and the last acquired
+ // offset should be adjusted to 14 instead of 30.
+ if (isPersisterReadGapWindowActive() && lastAcquiredOffset > lastOffset) {
+ lastAcquiredOffset = lastOffset;
+ }
}
// Create batches of acquired records.
@@ -1528,7 +1596,7 @@ public class SharePartition {
if (lastAcquiredOffset > endOffset) {
endOffset = lastAcquiredOffset;
}
- maybeUpdateReadGapFetchOffset(lastAcquiredOffset + 1);
+ maybeUpdatePersisterGapWindowStartOffset(lastAcquiredOffset + 1);
return new ShareAcquiredRecords(acquiredRecords, (int) (lastAcquiredOffset - firstAcquiredOffset + 1));
} finally {
lock.writeLock().unlock();
@@ -2135,15 +2203,15 @@ public class SharePartition {
// If the lastOffsetAcknowledged is equal to the last offset of entry, then the entire batch can potentially be removed.
if (lastOffsetAcknowledged == entry.getValue().lastOffset()) {
startOffset = cachedState.higherKey(lastOffsetAcknowledged);
- if (isInitialReadGapOffsetWindowActive()) {
+ if (isPersisterReadGapWindowActive()) {
// This case will arise if we have a situation where there is an acquirable gap after the lastOffsetAcknowledged.
// Ex, the cachedState has following state batches -> {(0, 10), (11, 20), (31,40)} and all these batches are acked.
- // There is a gap from 21 to 30. Let the initialReadGapOffset.gapStartOffset be 21. In this case,
+ // There is a gap from 21 to 30. Let the gapWindow's gapStartOffset be 21. In this case,
// lastOffsetAcknowledged will be 20, but we cannot simply move the start offset to the first offset
// of next cachedState batch (next cachedState batch is 31 to 40). There is an acquirable gap in between (21 to 30)
- // and The startOffset should be at 21. Hence, we set startOffset to the minimum of initialReadGapOffset.gapStartOffset
+ // and The startOffset should be at 21. Hence, we set startOffset to the minimum of gapWindow.gapStartOffset
// and higher key of lastOffsetAcknowledged
- startOffset = Math.min(initialReadGapOffset.gapStartOffset(), startOffset);
+ startOffset = Math.min(persisterReadResultGapWindow.gapStartOffset(), startOffset);
}
lastKeyToRemove = entry.getKey();
} else {
@@ -2208,8 +2276,8 @@ public class SharePartition {
return isRecordStateAcknowledged(startOffsetState);
}
- private boolean isInitialReadGapOffsetWindowActive() {
- return initialReadGapOffset != null && initialReadGapOffset.endOffset() == endOffset;
+ private boolean isPersisterReadGapWindowActive() {
+ return persisterReadResultGapWindow != null && persisterReadResultGapWindow.endOffset() == endOffset;
}
/**
@@ -2232,7 +2300,7 @@ public class SharePartition {
for (NavigableMap.Entry entry : cachedState.entrySet()) {
InFlightBatch inFlightBatch = entry.getValue();
- if (isInitialReadGapOffsetWindowActive() && inFlightBatch.lastOffset() >= initialReadGapOffset.gapStartOffset()) {
+ if (isPersisterReadGapWindowActive() && inFlightBatch.lastOffset() >= persisterReadResultGapWindow.gapStartOffset()) {
return lastOffsetAcknowledged;
}
@@ -2797,8 +2865,8 @@ public class SharePartition {
}
// Visible for testing
- InitialReadGapOffset initialReadGapOffset() {
- return initialReadGapOffset;
+ GapWindow persisterReadResultGapWindow() {
+ return persisterReadResultGapWindow;
}
// Visible for testing.
@@ -2807,17 +2875,17 @@ public class SharePartition {
}
/**
- * The InitialReadGapOffset class is used to record the gap start and end offset of the probable gaps
+ * The GapWindow class is used to record the gap start and end offset of the probable gaps
* of available records which are neither known to Persister nor to SharePartition. Share Partition
* will use this information to determine the next fetch offset and should try to fetch the records
* in the gap.
*/
// Visible for Testing
- static class InitialReadGapOffset {
+ static class GapWindow {
private final long endOffset;
private long gapStartOffset;
- InitialReadGapOffset(long endOffset, long gapStartOffset) {
+ GapWindow(long endOffset, long gapStartOffset) {
this.endOffset = endOffset;
this.gapStartOffset = gapStartOffset;
}
diff --git a/core/src/main/scala/kafka/coordinator/transaction/TransactionLog.scala b/core/src/main/scala/kafka/coordinator/transaction/TransactionLog.scala
index 75baa98da15..a3e9eacb66f 100644
--- a/core/src/main/scala/kafka/coordinator/transaction/TransactionLog.scala
+++ b/core/src/main/scala/kafka/coordinator/transaction/TransactionLog.scala
@@ -115,7 +115,13 @@ object TransactionLog {
val version = buffer.getShort
if (version >= TransactionLogValue.LOWEST_SUPPORTED_VERSION && version <= TransactionLogValue.HIGHEST_SUPPORTED_VERSION) {
val value = new TransactionLogValue(new ByteBufferAccessor(buffer), version)
- val transactionMetadata = new TransactionMetadata(
+ val state = TransactionState.fromId(value.transactionStatus)
+ val tps: util.Set[TopicPartition] = new util.HashSet[TopicPartition]()
+ if (!state.equals(TransactionState.EMPTY))
+ value.transactionPartitions.forEach(partitionsSchema => {
+ partitionsSchema.partitionIds.forEach(partitionId => tps.add(new TopicPartition(partitionsSchema.topic, partitionId.intValue())))
+ })
+ Some(new TransactionMetadata(
transactionalId,
value.producerId,
value.previousProducerId,
@@ -123,20 +129,11 @@ object TransactionLog {
value.producerEpoch,
RecordBatch.NO_PRODUCER_EPOCH,
value.transactionTimeoutMs,
- TransactionState.fromId(value.transactionStatus),
- util.Set.of(),
+ state,
+ tps,
value.transactionStartTimestampMs,
value.transactionLastUpdateTimestampMs,
- TransactionVersion.fromFeatureLevel(value.clientTransactionVersion))
-
- if (!transactionMetadata.state.equals(TransactionState.EMPTY))
- value.transactionPartitions.forEach(partitionsSchema => {
- transactionMetadata.addPartitions(partitionsSchema.partitionIds
- .stream
- .map(partitionId => new TopicPartition(partitionsSchema.topic, partitionId.intValue()))
- .toList)
- })
- Some(transactionMetadata)
+ TransactionVersion.fromFeatureLevel(value.clientTransactionVersion)))
} else throw new IllegalStateException(s"Unknown version $version from the transaction log message value")
}
}
diff --git a/core/src/main/scala/kafka/server/DynamicBrokerConfig.scala b/core/src/main/scala/kafka/server/DynamicBrokerConfig.scala
index c64218246b3..124a4c7b78f 100755
--- a/core/src/main/scala/kafka/server/DynamicBrokerConfig.scala
+++ b/core/src/main/scala/kafka/server/DynamicBrokerConfig.scala
@@ -239,7 +239,7 @@ object DynamicBrokerConfig {
}
}
val configHandler = new BrokerConfigHandler(config, quotaManagers)
- configHandler.processConfigChanges("", dynamicPerBrokerConfigs)
+ configHandler.processConfigChanges("", dynamicDefaultConfigs)
configHandler.processConfigChanges(config.brokerId.toString, dynamicPerBrokerConfigs)
}
}
diff --git a/core/src/main/scala/kafka/server/ReplicaManager.scala b/core/src/main/scala/kafka/server/ReplicaManager.scala
index 496e50208db..070b3e544a6 100644
--- a/core/src/main/scala/kafka/server/ReplicaManager.scala
+++ b/core/src/main/scala/kafka/server/ReplicaManager.scala
@@ -172,7 +172,8 @@ object ReplicaManager {
ListOffsetsRequest.LATEST_TIMESTAMP -> 1.toShort,
ListOffsetsRequest.MAX_TIMESTAMP -> 7.toShort,
ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP -> 8.toShort,
- ListOffsetsRequest.LATEST_TIERED_TIMESTAMP -> 9.toShort
+ ListOffsetsRequest.LATEST_TIERED_TIMESTAMP -> 9.toShort,
+ ListOffsetsRequest.EARLIEST_PENDING_UPLOAD_TIMESTAMP -> 11.toShort
)
def createLogReadResult(highWatermark: Long,
@@ -788,7 +789,11 @@ class ReplicaManager(val config: KafkaConfig,
hasCustomErrorMessage = customException.isDefined
)
}
- val entriesWithoutErrorsPerPartition = entriesPerPartition.filter { case (key, _) => !errorResults.contains(key) }
+ // In non-transaction paths, errorResults is typically empty, so we can
+ // directly use entriesPerPartition instead of creating a new filtered collection
+ val entriesWithoutErrorsPerPartition =
+ if (errorResults.nonEmpty) entriesPerPartition.filter { case (key, _) => !errorResults.contains(key) }
+ else entriesPerPartition
val preAppendPartitionResponses = buildProducePartitionStatus(errorResults).map { case (k, status) => k -> status.responseStatus }
diff --git a/core/src/main/scala/kafka/tools/TestRaftServer.scala b/core/src/main/scala/kafka/tools/TestRaftServer.scala
index a47b9fd4d47..2b3183be80b 100644
--- a/core/src/main/scala/kafka/tools/TestRaftServer.scala
+++ b/core/src/main/scala/kafka/tools/TestRaftServer.scala
@@ -25,7 +25,6 @@ import kafka.network.SocketServer
import kafka.raft.{DefaultExternalKRaftMetrics, KafkaRaftManager, RaftManager}
import kafka.server.{KafkaConfig, KafkaRequestHandlerPool}
import kafka.utils.{CoreUtils, Logging}
-import org.apache.kafka.common.errors.InvalidConfigurationException
import org.apache.kafka.common.message.ApiMessageType.ListenerType
import org.apache.kafka.common.metrics.Metrics
import org.apache.kafka.common.metrics.stats.Percentiles.BucketSizing
@@ -115,8 +114,8 @@ class TestRaftServer(
workloadGenerator = new RaftWorkloadGenerator(
raftManager,
time,
- recordsPerSec = 20000,
- recordSize = 256
+ recordsPerSec = throughput,
+ recordSize = recordSize
)
val requestHandler = new TestRaftRequestHandler(
@@ -428,7 +427,7 @@ object TestRaftServer extends Logging {
}
private class TestRaftServerOptions(args: Array[String]) extends CommandDefaultOptions(args) {
- val configOpt: OptionSpec[String] = parser.accepts("config", "Required configured file")
+ val configOpt: OptionSpec[String] = parser.accepts("config", "REQUIRED: The configured file")
.withRequiredArg
.describedAs("filename")
.ofType(classOf[String])
@@ -446,12 +445,14 @@ object TestRaftServer extends Logging {
.ofType(classOf[Int])
.defaultsTo(256)
- val directoryId: OptionSpec[String] = parser.accepts("replica-directory-id", "The directory id of the replica")
+ val directoryId: OptionSpec[String] = parser.accepts("replica-directory-id", "REQUIRED: The directory id of the replica")
.withRequiredArg
.describedAs("directory id")
.ofType(classOf[String])
options = parser.parse(args : _*)
+
+ def checkArgs(): Unit = CommandLineUtils.checkRequiredArgs(parser, options, configOpt, directoryId)
}
def main(args: Array[String]): Unit = {
@@ -459,16 +460,11 @@ object TestRaftServer extends Logging {
try {
CommandLineUtils.maybePrintHelpOrVersion(opts,
"Standalone raft server for performance testing")
+ opts.checkArgs()
val configFile = opts.options.valueOf(opts.configOpt)
- if (configFile == null) {
- throw new InvalidConfigurationException("Missing configuration file. Should specify with '--config'")
- }
-
val directoryIdAsString = opts.options.valueOf(opts.directoryId)
- if (directoryIdAsString == null) {
- throw new InvalidConfigurationException("Missing replica directory id. Should specify with --replica-directory-id")
- }
+
val serverProps = Utils.loadProps(configFile)
// KafkaConfig requires either `process.roles` or `zookeeper.connect`. Neither are
diff --git a/core/src/test/java/kafka/server/integration/EligibleLeaderReplicasIntegrationTest.java b/core/src/test/java/kafka/server/integration/EligibleLeaderReplicasIntegrationTest.java
deleted file mode 100644
index 28c12cf6bce..00000000000
--- a/core/src/test/java/kafka/server/integration/EligibleLeaderReplicasIntegrationTest.java
+++ /dev/null
@@ -1,458 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package kafka.server.integration;
-import kafka.integration.KafkaServerTestHarness;
-import kafka.server.KafkaBroker;
-import kafka.server.KafkaConfig;
-import kafka.utils.Logging;
-import kafka.utils.TestUtils;
-
-import org.apache.kafka.clients.CommonClientConfigs;
-import org.apache.kafka.clients.admin.Admin;
-import org.apache.kafka.clients.admin.AlterConfigOp;
-import org.apache.kafka.clients.admin.ConfigEntry;
-import org.apache.kafka.clients.admin.FeatureUpdate;
-import org.apache.kafka.clients.admin.NewTopic;
-import org.apache.kafka.clients.admin.TopicDescription;
-import org.apache.kafka.clients.admin.UpdateFeaturesOptions;
-import org.apache.kafka.clients.consumer.Consumer;
-import org.apache.kafka.clients.consumer.ConsumerConfig;
-import org.apache.kafka.clients.consumer.ConsumerRecords;
-import org.apache.kafka.clients.consumer.KafkaConsumer;
-import org.apache.kafka.clients.producer.KafkaProducer;
-import org.apache.kafka.clients.producer.Producer;
-import org.apache.kafka.clients.producer.ProducerConfig;
-import org.apache.kafka.clients.producer.ProducerRecord;
-import org.apache.kafka.common.Node;
-import org.apache.kafka.common.TopicPartitionInfo;
-import org.apache.kafka.common.config.ConfigResource;
-import org.apache.kafka.common.config.TopicConfig;
-import org.apache.kafka.common.security.auth.SecurityProtocol;
-import org.apache.kafka.common.serialization.StringDeserializer;
-import org.apache.kafka.common.serialization.StringSerializer;
-import org.apache.kafka.server.common.EligibleLeaderReplicasVersion;
-import org.apache.kafka.server.common.MetadataVersion;
-import org.apache.kafka.storage.internals.checkpoint.CleanShutdownFileHandler;
-
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.TestInfo;
-
-import java.io.File;
-import java.time.Duration;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Set;
-import java.util.concurrent.ExecutionException;
-import java.util.function.BiFunction;
-import java.util.stream.Collectors;
-
-import scala.collection.JavaConverters;
-import scala.collection.Seq;
-import scala.collection.mutable.HashMap;
-
-import static org.apache.kafka.test.TestUtils.DEFAULT_MAX_WAIT_MS;
-import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertNull;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-
-public class EligibleLeaderReplicasIntegrationTest extends KafkaServerTestHarness implements Logging {
- private String bootstrapServer;
- private String testTopicName;
- private Admin adminClient;
-
- @Override
- public MetadataVersion metadataVersion() {
- return MetadataVersion.IBP_4_0_IV1;
- }
-
- @Override
- public Seq generateConfigs() {
- List brokerConfigs = new ArrayList<>();
- brokerConfigs.addAll(scala.collection.JavaConverters.seqAsJavaList(TestUtils.createBrokerConfigs(
- 5, // The tests require 4 brokers to host the partition. However, we need the 5th broker to handle the admin client requests.
- true,
- true,
- scala.Option.empty(),
- scala.Option.empty(),
- scala.Option.empty(),
- true,
- false,
- false,
- false,
- new HashMap<>(),
- 1,
- false,
- 1,
- (short) 4,
- 0,
- false
- )));
- List configs = new ArrayList<>();
- for (Properties props : brokerConfigs) {
- configs.add(KafkaConfig.fromProps(props));
- }
- return JavaConverters.asScalaBuffer(configs).toSeq();
- }
-
- @BeforeEach
- @Override
- public void setUp(TestInfo info) {
- super.setUp(info);
- // create adminClient
- Properties props = new Properties();
- bootstrapServer = bootstrapServers(listenerName());
- props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, bootstrapServer);
- adminClient = Admin.create(props);
- adminClient.updateFeatures(
- Map.of(EligibleLeaderReplicasVersion.FEATURE_NAME,
- new FeatureUpdate(EligibleLeaderReplicasVersion.ELRV_1.featureLevel(), FeatureUpdate.UpgradeType.UPGRADE)),
- new UpdateFeaturesOptions()
- );
- testTopicName = String.format("%s-%s", info.getTestMethod().get().getName(), "ELR-test");
- }
-
- @AfterEach
- public void close() throws Exception {
- if (adminClient != null) adminClient.close();
- }
-
- @Test
- public void testHighWatermarkShouldNotAdvanceIfUnderMinIsr() throws ExecutionException, InterruptedException {
- adminClient.createTopics(
- List.of(new NewTopic(testTopicName, 1, (short) 4))).all().get();
- TestUtils.waitForPartitionMetadata(brokers(), testTopicName, 0, 1000);
-
- ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, testTopicName);
- Collection ops = new ArrayList<>();
- ops.add(new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "3"), AlterConfigOp.OpType.SET));
- Map> configOps = Map.of(configResource, ops);
- // alter configs on target cluster
- adminClient.incrementalAlterConfigs(configOps).all().get();
- Producer producer = null;
- Consumer consumer = null;
- try {
- TopicDescription testTopicDescription = adminClient.describeTopics(List.of(testTopicName))
- .allTopicNames().get().get(testTopicName);
- TopicPartitionInfo topicPartitionInfo = testTopicDescription.partitions().get(0);
- List initialReplicas = topicPartitionInfo.replicas();
- assertEquals(4, topicPartitionInfo.isr().size());
- assertEquals(0, topicPartitionInfo.elr().size());
- assertEquals(0, topicPartitionInfo.lastKnownElr().size());
-
- Properties producerProps = new Properties();
- producerProps.putIfAbsent(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
- producerProps.putIfAbsent(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
- producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServer);
- // Use Ack=1 for the producer.
- producerProps.put(ProducerConfig.ACKS_CONFIG, "1");
- producer = new KafkaProducer(producerProps);
-
- Properties consumerProps = new Properties();
- consumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServer);
- consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, "test");
- consumerProps.put(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, "10");
- consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
- consumerProps.putIfAbsent(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
- consumerProps.putIfAbsent(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
- consumer = new KafkaConsumer<>(consumerProps);
- consumer.subscribe(Set.of(testTopicName));
-
- producer.send(new ProducerRecord<>(testTopicName, "0", "0")).get();
- waitUntilOneMessageIsConsumed(consumer);
-
- killBroker(initialReplicas.get(0).id());
- killBroker(initialReplicas.get(1).id());
-
- waitForIsrAndElr((isrSize, elrSize) -> {
- return isrSize == 2 && elrSize == 1;
- });
-
- // Now the partition is under min ISR. HWM should not advance.
- producer.send(new ProducerRecord<>(testTopicName, "1", "1")).get();
- Thread.sleep(100);
- assertEquals(0, consumer.poll(Duration.ofSeconds(1L)).count());
-
- // Restore the min ISR and the previous log should be visible.
- startBroker(initialReplicas.get(1).id());
- startBroker(initialReplicas.get(0).id());
- waitForIsrAndElr((isrSize, elrSize) -> {
- return isrSize == 4 && elrSize == 0;
- });
-
- waitUntilOneMessageIsConsumed(consumer);
- } finally {
- restartDeadBrokers(false);
- if (consumer != null) consumer.close();
- if (producer != null) producer.close();
- }
- }
-
- void waitUntilOneMessageIsConsumed(Consumer consumer) {
- TestUtils.waitUntilTrue(
- () -> {
- try {
- ConsumerRecords record = consumer.poll(Duration.ofMillis(100L));
- return record.count() >= 1;
- } catch (Exception e) {
- return false;
- }
- },
- () -> "fail to consume messages",
- DEFAULT_MAX_WAIT_MS, 100L
- );
- }
-
- @Test
- public void testElrMemberCanBeElected() throws ExecutionException, InterruptedException {
- adminClient.createTopics(
- List.of(new NewTopic(testTopicName, 1, (short) 4))).all().get();
- TestUtils.waitForPartitionMetadata(brokers(), testTopicName, 0, 1000);
-
- ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, testTopicName);
- Collection ops = new ArrayList<>();
- ops.add(new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "3"), AlterConfigOp.OpType.SET));
- Map> configOps = Map.of(configResource, ops);
- // alter configs on target cluster
- adminClient.incrementalAlterConfigs(configOps).all().get();
-
- try {
- TopicDescription testTopicDescription = adminClient.describeTopics(List.of(testTopicName))
- .allTopicNames().get().get(testTopicName);
- TopicPartitionInfo topicPartitionInfo = testTopicDescription.partitions().get(0);
- List initialReplicas = topicPartitionInfo.replicas();
- assertEquals(4, topicPartitionInfo.isr().size());
- assertEquals(0, topicPartitionInfo.elr().size());
- assertEquals(0, topicPartitionInfo.lastKnownElr().size());
-
- killBroker(initialReplicas.get(0).id());
- killBroker(initialReplicas.get(1).id());
- killBroker(initialReplicas.get(2).id());
-
- waitForIsrAndElr((isrSize, elrSize) -> {
- return isrSize == 1 && elrSize == 2;
- });
-
- killBroker(initialReplicas.get(3).id());
-
- waitForIsrAndElr((isrSize, elrSize) -> {
- return isrSize == 0 && elrSize == 3;
- });
-
- topicPartitionInfo = adminClient.describeTopics(List.of(testTopicName))
- .allTopicNames().get().get(testTopicName).partitions().get(0);
- assertEquals(1, topicPartitionInfo.lastKnownElr().size(), topicPartitionInfo.toString());
- int expectLastKnownLeader = initialReplicas.get(3).id();
- assertEquals(expectLastKnownLeader, topicPartitionInfo.lastKnownElr().get(0).id(), topicPartitionInfo.toString());
-
- // At this point, all the replicas are failed and the last know leader is No.3 and 3 members in the ELR.
- // Restart one broker of the ELR and it should be the leader.
-
- int expectLeader = topicPartitionInfo.elr().stream()
- .filter(node -> node.id() != expectLastKnownLeader).toList().get(0).id();
-
- startBroker(expectLeader);
- waitForIsrAndElr((isrSize, elrSize) -> {
- return isrSize == 1 && elrSize == 2;
- });
-
- topicPartitionInfo = adminClient.describeTopics(List.of(testTopicName))
- .allTopicNames().get().get(testTopicName).partitions().get(0);
- assertEquals(0, topicPartitionInfo.lastKnownElr().size(), topicPartitionInfo.toString());
- assertEquals(expectLeader, topicPartitionInfo.leader().id(), topicPartitionInfo.toString());
-
- // Start another 2 brokers and the ELR fields should be cleaned.
- topicPartitionInfo.replicas().stream().filter(node -> node.id() != expectLeader).limit(2)
- .forEach(node -> startBroker(node.id()));
-
- waitForIsrAndElr((isrSize, elrSize) -> {
- return isrSize == 3 && elrSize == 0;
- });
-
- topicPartitionInfo = adminClient.describeTopics(List.of(testTopicName))
- .allTopicNames().get().get(testTopicName).partitions().get(0);
- assertEquals(0, topicPartitionInfo.lastKnownElr().size(), topicPartitionInfo.toString());
- assertEquals(expectLeader, topicPartitionInfo.leader().id(), topicPartitionInfo.toString());
- } finally {
- restartDeadBrokers(false);
- }
- }
-
- @Test
- public void testElrMemberShouldBeKickOutWhenUncleanShutdown() throws ExecutionException, InterruptedException {
- adminClient.createTopics(
- List.of(new NewTopic(testTopicName, 1, (short) 4))).all().get();
- TestUtils.waitForPartitionMetadata(brokers(), testTopicName, 0, 1000);
-
- ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, testTopicName);
- Collection ops = new ArrayList<>();
- ops.add(new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "3"), AlterConfigOp.OpType.SET));
- Map> configOps = Map.of(configResource, ops);
- // alter configs on target cluster
- adminClient.incrementalAlterConfigs(configOps).all().get();
-
- try {
- TopicDescription testTopicDescription = adminClient.describeTopics(List.of(testTopicName))
- .allTopicNames().get().get(testTopicName);
- TopicPartitionInfo topicPartitionInfo = testTopicDescription.partitions().get(0);
- List initialReplicas = topicPartitionInfo.replicas();
- assertEquals(4, topicPartitionInfo.isr().size());
- assertEquals(0, topicPartitionInfo.elr().size());
- assertEquals(0, topicPartitionInfo.lastKnownElr().size());
-
- killBroker(initialReplicas.get(0).id());
- killBroker(initialReplicas.get(1).id());
- killBroker(initialReplicas.get(2).id());
- killBroker(initialReplicas.get(3).id());
-
- waitForIsrAndElr((isrSize, elrSize) -> {
- return isrSize == 0 && elrSize == 3;
- });
- topicPartitionInfo = adminClient.describeTopics(List.of(testTopicName))
- .allTopicNames().get().get(testTopicName).partitions().get(0);
-
- int brokerToBeUncleanShutdown = topicPartitionInfo.elr().get(0).id();
- KafkaBroker broker = brokers().find(b -> {
- return b.config().brokerId() == brokerToBeUncleanShutdown;
- }).get();
- Seq dirs = broker.logManager().liveLogDirs();
- assertEquals(1, dirs.size());
- CleanShutdownFileHandler handler = new CleanShutdownFileHandler(dirs.apply(0).toString());
- assertTrue(handler.exists());
- assertDoesNotThrow(() -> handler.delete());
-
- // After remove the clean shutdown file, the broker should report unclean shutdown during restart.
- startBroker(brokerToBeUncleanShutdown);
- waitForIsrAndElr((isrSize, elrSize) -> {
- return isrSize == 0 && elrSize == 2;
- });
- topicPartitionInfo = adminClient.describeTopics(List.of(testTopicName))
- .allTopicNames().get().get(testTopicName).partitions().get(0);
- assertNull(topicPartitionInfo.leader());
- assertEquals(1, topicPartitionInfo.lastKnownElr().size());
- } finally {
- restartDeadBrokers(false);
- }
- }
-
- /*
- This test is only valid for KIP-966 part 1. When the unclean recovery is implemented, it should be removed.
- */
- @Test
- public void testLastKnownLeaderShouldBeElectedIfEmptyElr() throws ExecutionException, InterruptedException {
- adminClient.createTopics(
- List.of(new NewTopic(testTopicName, 1, (short) 4))).all().get();
- TestUtils.waitForPartitionMetadata(brokers(), testTopicName, 0, 1000);
-
- ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, testTopicName);
- Collection ops = new ArrayList<>();
- ops.add(new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "3"), AlterConfigOp.OpType.SET));
- Map> configOps = Map.of(configResource, ops);
- // alter configs on target cluster
- adminClient.incrementalAlterConfigs(configOps).all().get();
-
- try {
- TopicDescription testTopicDescription = adminClient.describeTopics(List.of(testTopicName))
- .allTopicNames().get().get(testTopicName);
- TopicPartitionInfo topicPartitionInfo = testTopicDescription.partitions().get(0);
- List initialReplicas = topicPartitionInfo.replicas();
- assertEquals(4, topicPartitionInfo.isr().size());
- assertEquals(0, topicPartitionInfo.elr().size());
- assertEquals(0, topicPartitionInfo.lastKnownElr().size());
-
- killBroker(initialReplicas.get(0).id());
- killBroker(initialReplicas.get(1).id());
- killBroker(initialReplicas.get(2).id());
- killBroker(initialReplicas.get(3).id());
-
- waitForIsrAndElr((isrSize, elrSize) -> {
- return isrSize == 0 && elrSize == 3;
- });
- topicPartitionInfo = adminClient.describeTopics(List.of(testTopicName))
- .allTopicNames().get().get(testTopicName).partitions().get(0);
- int lastKnownLeader = topicPartitionInfo.lastKnownElr().get(0).id();
-
- Set initialReplicaSet = initialReplicas.stream().map(node -> node.id()).collect(Collectors.toSet());
- brokers().foreach(broker -> {
- if (initialReplicaSet.contains(broker.config().brokerId())) {
- Seq dirs = broker.logManager().liveLogDirs();
- assertEquals(1, dirs.size());
- CleanShutdownFileHandler handler = new CleanShutdownFileHandler(dirs.apply(0).toString());
- assertDoesNotThrow(() -> handler.delete());
- }
- return true;
- });
-
-
- // After remove the clean shutdown file, the broker should report unclean shutdown during restart.
- topicPartitionInfo.replicas().forEach(replica -> {
- if (replica.id() != lastKnownLeader) startBroker(replica.id());
- });
- waitForIsrAndElr((isrSize, elrSize) -> {
- return isrSize == 0 && elrSize == 1;
- });
- topicPartitionInfo = adminClient.describeTopics(List.of(testTopicName))
- .allTopicNames().get().get(testTopicName).partitions().get(0);
- assertNull(topicPartitionInfo.leader());
- assertEquals(1, topicPartitionInfo.lastKnownElr().size());
-
- // Now if the last known leader goes through unclean shutdown, it will still be elected.
- startBroker(lastKnownLeader);
- waitForIsrAndElr((isrSize, elrSize) -> {
- return isrSize > 0 && elrSize == 0;
- });
-
- TestUtils.waitUntilTrue(
- () -> {
- try {
- TopicPartitionInfo partition = adminClient.describeTopics(List.of(testTopicName))
- .allTopicNames().get().get(testTopicName).partitions().get(0);
- if (partition.leader() == null) return false;
- return partition.lastKnownElr().isEmpty() && partition.elr().isEmpty() && partition.leader().id() == lastKnownLeader;
- } catch (Exception e) {
- return false;
- }
- },
- () -> String.format("Partition metadata for %s is not correct", testTopicName),
- DEFAULT_MAX_WAIT_MS, 100L
- );
- } finally {
- restartDeadBrokers(false);
- }
- }
-
- void waitForIsrAndElr(BiFunction isIsrAndElrSizeSatisfied) {
- TestUtils.waitUntilTrue(
- () -> {
- try {
- TopicDescription topicDescription = adminClient.describeTopics(List.of(testTopicName))
- .allTopicNames().get().get(testTopicName);
- TopicPartitionInfo partition = topicDescription.partitions().get(0);
- return isIsrAndElrSizeSatisfied.apply(partition.isr().size(), partition.elr().size());
- } catch (Exception e) {
- return false;
- }
- },
- () -> String.format("Partition metadata for %s is not propagated", testTopicName),
- DEFAULT_MAX_WAIT_MS, 100L);
- }
-}
diff --git a/core/src/test/java/kafka/server/share/SharePartitionTest.java b/core/src/test/java/kafka/server/share/SharePartitionTest.java
index 30b49e17b16..47e214a716f 100644
--- a/core/src/test/java/kafka/server/share/SharePartitionTest.java
+++ b/core/src/test/java/kafka/server/share/SharePartitionTest.java
@@ -17,6 +17,7 @@
package kafka.server.share;
import kafka.server.ReplicaManager;
+import kafka.server.share.SharePartition.GapWindow;
import kafka.server.share.SharePartition.SharePartitionState;
import kafka.server.share.SharePartitionManager.SharePartitionListener;
@@ -965,11 +966,11 @@ public class SharePartitionTest {
assertEquals(3, sharePartition.cachedState().get(21L).batchDeliveryCount());
assertNull(sharePartition.cachedState().get(21L).offsetState());
- SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset();
- assertNotNull(initialReadGapOffset);
+ GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
+ assertNotNull(persisterReadResultGapWindow);
- assertEquals(10, initialReadGapOffset.gapStartOffset());
- assertEquals(30, initialReadGapOffset.endOffset());
+ assertEquals(10, persisterReadResultGapWindow.gapStartOffset());
+ assertEquals(30, persisterReadResultGapWindow.endOffset());
}
@Test
@@ -1010,11 +1011,11 @@ public class SharePartitionTest {
assertEquals(3, sharePartition.cachedState().get(30L).batchDeliveryCount());
assertNull(sharePartition.cachedState().get(30L).offsetState());
- SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset();
- assertNotNull(initialReadGapOffset);
+ GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
+ assertNotNull(persisterReadResultGapWindow);
- assertEquals(10, initialReadGapOffset.gapStartOffset());
- assertEquals(40, initialReadGapOffset.endOffset());
+ assertEquals(10, persisterReadResultGapWindow.gapStartOffset());
+ assertEquals(40, persisterReadResultGapWindow.endOffset());
}
@Test
@@ -1051,11 +1052,11 @@ public class SharePartitionTest {
assertEquals(3, sharePartition.cachedState().get(30L).batchDeliveryCount());
assertNull(sharePartition.cachedState().get(30L).offsetState());
- SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset();
- assertNotNull(initialReadGapOffset);
+ GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
+ assertNotNull(persisterReadResultGapWindow);
- assertEquals(21, initialReadGapOffset.gapStartOffset());
- assertEquals(40, initialReadGapOffset.endOffset());
+ assertEquals(21, persisterReadResultGapWindow.gapStartOffset());
+ assertEquals(40, persisterReadResultGapWindow.endOffset());
}
@Test
@@ -1082,10 +1083,676 @@ public class SharePartitionTest {
assertEquals(3, sharePartition.stateEpoch());
assertEquals(31, sharePartition.nextFetchOffset());
- SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset();
+ GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
- // Since there are no gaps present in the readState response, initialReadGapOffset should be null
- assertNull(initialReadGapOffset);
+ // Since there are no gaps present in the readState response, persisterReadResultGapWindow should be null
+ assertNull(persisterReadResultGapWindow);
+ }
+
+ @Test
+ public void testMaybeInitializeAndAcquire() {
+ Persister persister = Mockito.mock(Persister.class);
+ ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
+ Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
+ new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
+ PartitionFactory.newPartitionAllData(0, 3, 10L, Errors.NONE.code(), Errors.NONE.message(),
+ List.of(
+ new PersisterStateBatch(15L, 18L, RecordState.AVAILABLE.id, (short) 2),
+ new PersisterStateBatch(20L, 22L, RecordState.ARCHIVED.id, (short) 2),
+ new PersisterStateBatch(26L, 30L, RecordState.AVAILABLE.id, (short) 3)))))));
+ Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
+ SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
+
+ CompletableFuture result = sharePartition.maybeInitialize();
+ assertTrue(result.isDone());
+ assertFalse(result.isCompletedExceptionally());
+
+ assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
+ assertEquals(3, sharePartition.cachedState().size());
+ assertEquals(10, sharePartition.startOffset());
+ assertEquals(30, sharePartition.endOffset());
+ assertEquals(10, sharePartition.nextFetchOffset());
+
+ assertEquals(18, sharePartition.cachedState().get(15L).lastOffset());
+ assertEquals(22, sharePartition.cachedState().get(20L).lastOffset());
+ assertEquals(30, sharePartition.cachedState().get(26L).lastOffset());
+ assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState());
+ assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState());
+ assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState());
+ assertNotNull(sharePartition.persisterReadResultGapWindow());
+ assertEquals(10L, sharePartition.persisterReadResultGapWindow().gapStartOffset());
+ assertEquals(30L, sharePartition.persisterReadResultGapWindow().endOffset());
+
+ // Create a single batch record that covers the entire range from 10 to 30 of initial read gap.
+ // The records in the batch are from 10 to 49.
+ MemoryRecords records = memoryRecords(40, 10);
+ // Set max fetch records to 1, records will be acquired till the first gap is encountered.
+ List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
+ MEMBER_ID,
+ BATCH_SIZE,
+ 1,
+ 10,
+ fetchPartitionData(records),
+ FETCH_ISOLATION_HWM),
+ 5);
+
+ assertArrayEquals(expectedAcquiredRecord(10, 14, 1).toArray(), acquiredRecordsList.toArray());
+ assertEquals(15, sharePartition.nextFetchOffset());
+ assertEquals(4, sharePartition.cachedState().size());
+ assertEquals(10, sharePartition.cachedState().get(10L).firstOffset());
+ assertEquals(14, sharePartition.cachedState().get(10L).lastOffset());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState());
+ assertEquals(1, sharePartition.cachedState().get(10L).batchDeliveryCount());
+ assertNull(sharePartition.cachedState().get(10L).offsetState());
+ assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState());
+ assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState());
+ assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState());
+ assertNotNull(sharePartition.persisterReadResultGapWindow());
+ assertEquals(15L, sharePartition.persisterReadResultGapWindow().gapStartOffset());
+
+ // Send the same batch again to acquire the next set of records.
+ acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
+ MEMBER_ID,
+ BATCH_SIZE,
+ 10,
+ 15,
+ fetchPartitionData(records),
+ FETCH_ISOLATION_HWM),
+ 13);
+
+ List expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(15, 18, 3));
+ expectedAcquiredRecords.addAll(expectedAcquiredRecord(19, 19, 1));
+ expectedAcquiredRecords.addAll(expectedAcquiredRecord(23, 25, 1));
+ expectedAcquiredRecords.addAll(expectedAcquiredRecord(26, 30, 4));
+
+ assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray());
+ assertEquals(31, sharePartition.nextFetchOffset());
+ assertEquals(6, sharePartition.cachedState().size());
+ assertEquals(19, sharePartition.cachedState().get(19L).firstOffset());
+ assertEquals(19, sharePartition.cachedState().get(19L).lastOffset());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(19L).batchState());
+ assertEquals(1, sharePartition.cachedState().get(19L).batchDeliveryCount());
+ assertNull(sharePartition.cachedState().get(19L).offsetState());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState());
+ assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState());
+ assertEquals(23, sharePartition.cachedState().get(23L).firstOffset());
+ assertEquals(25, sharePartition.cachedState().get(23L).lastOffset());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(23L).batchState());
+ assertEquals(1, sharePartition.cachedState().get(23L).batchDeliveryCount());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(26L).batchState());
+ assertEquals(30L, sharePartition.endOffset());
+ // As all the gaps are now filled, the persisterReadResultGapWindow should be null.
+ assertNull(sharePartition.persisterReadResultGapWindow());
+
+ // Now initial read gap is filled, so the complete batch can be acquired despite max fetch records being 1.
+ acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
+ MEMBER_ID,
+ BATCH_SIZE,
+ 1,
+ 31,
+ fetchPartitionData(records),
+ FETCH_ISOLATION_HWM),
+ 19);
+
+ assertArrayEquals(expectedAcquiredRecord(31, 49, 1).toArray(), acquiredRecordsList.toArray());
+ assertEquals(50, sharePartition.nextFetchOffset());
+ assertEquals(7, sharePartition.cachedState().size());
+ assertEquals(31, sharePartition.cachedState().get(31L).firstOffset());
+ assertEquals(49, sharePartition.cachedState().get(31L).lastOffset());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(31L).batchState());
+ assertEquals(1, sharePartition.cachedState().get(31L).batchDeliveryCount());
+ assertNull(sharePartition.cachedState().get(31L).offsetState());
+ assertEquals(49L, sharePartition.endOffset());
+ }
+
+ @Test
+ public void testMaybeInitializeAndAcquireWithHigherMaxFetchRecords() {
+ Persister persister = Mockito.mock(Persister.class);
+ ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
+ Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
+ new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
+ PartitionFactory.newPartitionAllData(0, 3, 10L, Errors.NONE.code(), Errors.NONE.message(),
+ List.of(
+ new PersisterStateBatch(15L, 18L, RecordState.AVAILABLE.id, (short) 2),
+ new PersisterStateBatch(20L, 22L, RecordState.ARCHIVED.id, (short) 2),
+ new PersisterStateBatch(26L, 30L, RecordState.AVAILABLE.id, (short) 3)))))));
+ Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
+ SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
+
+ CompletableFuture result = sharePartition.maybeInitialize();
+ assertTrue(result.isDone());
+ assertFalse(result.isCompletedExceptionally());
+
+ assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
+ assertEquals(3, sharePartition.cachedState().size());
+ assertEquals(10, sharePartition.startOffset());
+ assertEquals(30, sharePartition.endOffset());
+ assertEquals(10, sharePartition.nextFetchOffset());
+
+ assertEquals(18, sharePartition.cachedState().get(15L).lastOffset());
+ assertEquals(22, sharePartition.cachedState().get(20L).lastOffset());
+ assertEquals(30, sharePartition.cachedState().get(26L).lastOffset());
+ assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState());
+ assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState());
+ assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState());
+ assertNotNull(sharePartition.persisterReadResultGapWindow());
+ assertEquals(10L, sharePartition.persisterReadResultGapWindow().gapStartOffset());
+ assertEquals(30L, sharePartition.persisterReadResultGapWindow().endOffset());
+
+ // Create a single batch record that covers the entire range from 10 to 30 of initial read gap.
+ // The records in the batch are from 10 to 49.
+ MemoryRecords records = memoryRecords(40, 10);
+ // Set max fetch records to 500, all records should be acquired.
+ List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
+ MEMBER_ID,
+ BATCH_SIZE,
+ 500,
+ 10,
+ fetchPartitionData(records),
+ FETCH_ISOLATION_HWM),
+ 37);
+
+ List expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(10, 14, 1));
+ expectedAcquiredRecords.addAll(expectedAcquiredRecord(15, 18, 3));
+ expectedAcquiredRecords.addAll(expectedAcquiredRecord(19, 19, 1));
+ expectedAcquiredRecords.addAll(expectedAcquiredRecord(23, 25, 1));
+ expectedAcquiredRecords.addAll(expectedAcquiredRecord(26, 30, 4));
+ expectedAcquiredRecords.addAll(expectedAcquiredRecord(31, 49, 1));
+
+ assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray());
+ assertEquals(50, sharePartition.nextFetchOffset());
+ assertEquals(7, sharePartition.cachedState().size());
+ assertEquals(10, sharePartition.cachedState().get(10L).firstOffset());
+ assertEquals(14, sharePartition.cachedState().get(10L).lastOffset());
+ assertEquals(19, sharePartition.cachedState().get(19L).firstOffset());
+ assertEquals(19, sharePartition.cachedState().get(19L).lastOffset());
+ assertEquals(23, sharePartition.cachedState().get(23L).firstOffset());
+ assertEquals(25, sharePartition.cachedState().get(23L).lastOffset());
+ assertEquals(31, sharePartition.cachedState().get(31L).firstOffset());
+ assertEquals(49, sharePartition.cachedState().get(31L).lastOffset());
+
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(19L).batchState());
+ assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(23L).batchState());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(26L).batchState());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(31L).batchState());
+ assertEquals(49L, sharePartition.endOffset());
+ // As all the gaps are now filled, the persisterReadResultGapWindow should be null.
+ assertNull(sharePartition.persisterReadResultGapWindow());
+ }
+
+ @Test
+ public void testMaybeInitializeAndAcquireWithFetchBatchLastOffsetWithinCachedBatch() {
+ Persister persister = Mockito.mock(Persister.class);
+ ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
+ Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
+ new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
+ PartitionFactory.newPartitionAllData(0, 3, 10L, Errors.NONE.code(), Errors.NONE.message(),
+ List.of(
+ new PersisterStateBatch(15L, 18L, RecordState.AVAILABLE.id, (short) 2),
+ new PersisterStateBatch(20L, 22L, RecordState.ARCHIVED.id, (short) 2),
+ new PersisterStateBatch(26L, 30L, RecordState.AVAILABLE.id, (short) 3)))))));
+ Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
+ SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
+
+ CompletableFuture result = sharePartition.maybeInitialize();
+ assertTrue(result.isDone());
+ assertFalse(result.isCompletedExceptionally());
+
+ assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
+ assertEquals(3, sharePartition.cachedState().size());
+ assertEquals(10, sharePartition.startOffset());
+ assertEquals(30, sharePartition.endOffset());
+ assertEquals(10, sharePartition.nextFetchOffset());
+
+ assertEquals(18, sharePartition.cachedState().get(15L).lastOffset());
+ assertEquals(22, sharePartition.cachedState().get(20L).lastOffset());
+ assertEquals(30, sharePartition.cachedState().get(26L).lastOffset());
+ assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState());
+ assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState());
+ assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState());
+ assertNotNull(sharePartition.persisterReadResultGapWindow());
+ assertEquals(10L, sharePartition.persisterReadResultGapWindow().gapStartOffset());
+ assertEquals(30L, sharePartition.persisterReadResultGapWindow().endOffset());
+
+ // Create a single batch record that ends in between the cached batch and the fetch offset is
+ // post startOffset.
+ MemoryRecords records = memoryRecords(16, 12);
+ // Set max fetch records to 500, records should be acquired till the last offset of the fetched batch.
+ List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
+ MEMBER_ID,
+ BATCH_SIZE,
+ 500,
+ 10,
+ fetchPartitionData(records),
+ FETCH_ISOLATION_HWM),
+ 13);
+
+ List expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(12, 14, 1));
+ expectedAcquiredRecords.addAll(expectedAcquiredRecord(15, 18, 3));
+ expectedAcquiredRecords.addAll(expectedAcquiredRecord(19, 19, 1));
+ expectedAcquiredRecords.addAll(expectedAcquiredRecord(23, 25, 1));
+ expectedAcquiredRecords.addAll(expectedAcquiredRecords(26, 27, 4));
+
+ assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray());
+ assertEquals(28, sharePartition.nextFetchOffset());
+ assertEquals(6, sharePartition.cachedState().size());
+ assertEquals(12, sharePartition.cachedState().get(12L).firstOffset());
+ assertEquals(14, sharePartition.cachedState().get(12L).lastOffset());
+ assertEquals(19, sharePartition.cachedState().get(19L).firstOffset());
+ assertEquals(19, sharePartition.cachedState().get(19L).lastOffset());
+ assertEquals(23, sharePartition.cachedState().get(23L).firstOffset());
+ assertEquals(25, sharePartition.cachedState().get(23L).lastOffset());
+
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(12L).batchState());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(19L).batchState());
+ assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(23L).batchState());
+ assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(26L).batchState());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(26L).offsetState().get(26L).state());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(26L).offsetState().get(27L).state());
+ assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).offsetState().get(28L).state());
+ assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).offsetState().get(29L).state());
+ assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).offsetState().get(30L).state());
+ assertEquals(30L, sharePartition.endOffset());
+ assertNotNull(sharePartition.persisterReadResultGapWindow());
+ assertEquals(28L, sharePartition.persisterReadResultGapWindow().gapStartOffset());
+ }
+
+ @Test
+ public void testMaybeInitializeAndAcquireWithFetchBatchPriorStartOffset() {
+ Persister persister = Mockito.mock(Persister.class);
+ ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
+ Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
+ new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
+ PartitionFactory.newPartitionAllData(0, 3, 10L, Errors.NONE.code(), Errors.NONE.message(),
+ List.of(
+ new PersisterStateBatch(15L, 18L, RecordState.AVAILABLE.id, (short) 2),
+ new PersisterStateBatch(20L, 22L, RecordState.ARCHIVED.id, (short) 2),
+ new PersisterStateBatch(26L, 30L, RecordState.AVAILABLE.id, (short) 3)))))));
+ Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
+ SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
+
+ CompletableFuture result = sharePartition.maybeInitialize();
+ assertTrue(result.isDone());
+ assertFalse(result.isCompletedExceptionally());
+
+ assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
+ assertEquals(3, sharePartition.cachedState().size());
+ assertEquals(10, sharePartition.startOffset());
+ assertEquals(30, sharePartition.endOffset());
+ assertEquals(10, sharePartition.nextFetchOffset());
+
+ assertEquals(18, sharePartition.cachedState().get(15L).lastOffset());
+ assertEquals(22, sharePartition.cachedState().get(20L).lastOffset());
+ assertEquals(30, sharePartition.cachedState().get(26L).lastOffset());
+ assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState());
+ assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState());
+ assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState());
+ assertNotNull(sharePartition.persisterReadResultGapWindow());
+ assertEquals(10L, sharePartition.persisterReadResultGapWindow().gapStartOffset());
+ assertEquals(30L, sharePartition.persisterReadResultGapWindow().endOffset());
+
+ // Create a single batch record where first offset is prior startOffset.
+ MemoryRecords records = memoryRecords(16, 6);
+ // Set max fetch records to 500, records should be acquired till the last offset of the fetched batch.
+ List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
+ MEMBER_ID,
+ BATCH_SIZE,
+ 500,
+ 10,
+ fetchPartitionData(records),
+ FETCH_ISOLATION_HWM),
+ 10);
+
+ List expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(10, 14, 1));
+ expectedAcquiredRecords.addAll(expectedAcquiredRecord(15, 18, 3));
+ expectedAcquiredRecords.addAll(expectedAcquiredRecord(19, 19, 1));
+
+ assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray());
+ assertEquals(23, sharePartition.nextFetchOffset());
+ assertEquals(5, sharePartition.cachedState().size());
+ assertEquals(10, sharePartition.cachedState().get(10L).firstOffset());
+ assertEquals(14, sharePartition.cachedState().get(10L).lastOffset());
+ assertEquals(19, sharePartition.cachedState().get(19L).firstOffset());
+ assertEquals(19, sharePartition.cachedState().get(19L).lastOffset());
+
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(19L).batchState());
+ assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState());
+ assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState());
+ assertEquals(30L, sharePartition.endOffset());
+ assertNotNull(sharePartition.persisterReadResultGapWindow());
+ assertEquals(20L, sharePartition.persisterReadResultGapWindow().gapStartOffset());
+ }
+
+ @Test
+ public void testMaybeInitializeAndAcquireWithMultipleBatches() {
+ Persister persister = Mockito.mock(Persister.class);
+ ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
+ Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
+ new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
+ PartitionFactory.newPartitionAllData(0, 3, 5L, Errors.NONE.code(), Errors.NONE.message(),
+ List.of(
+ new PersisterStateBatch(15L, 18L, RecordState.AVAILABLE.id, (short) 2),
+ new PersisterStateBatch(20L, 22L, RecordState.ARCHIVED.id, (short) 2),
+ new PersisterStateBatch(26L, 30L, RecordState.AVAILABLE.id, (short) 3)))))));
+ Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
+ SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
+
+ CompletableFuture result = sharePartition.maybeInitialize();
+ assertTrue(result.isDone());
+ assertFalse(result.isCompletedExceptionally());
+
+ assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
+ assertEquals(3, sharePartition.cachedState().size());
+ assertEquals(5, sharePartition.startOffset());
+ assertEquals(30, sharePartition.endOffset());
+ assertEquals(5, sharePartition.nextFetchOffset());
+
+ assertEquals(18, sharePartition.cachedState().get(15L).lastOffset());
+ assertEquals(22, sharePartition.cachedState().get(20L).lastOffset());
+ assertEquals(30, sharePartition.cachedState().get(26L).lastOffset());
+ assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState());
+ assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState());
+ assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState());
+ assertNotNull(sharePartition.persisterReadResultGapWindow());
+ assertEquals(5L, sharePartition.persisterReadResultGapWindow().gapStartOffset());
+ assertEquals(30L, sharePartition.persisterReadResultGapWindow().endOffset());
+
+ // Create multiple batch records that covers the entire range from 5 to 30 of initial read gap.
+ // The records in the batch are from 5 to 49.
+ ByteBuffer buffer = ByteBuffer.allocate(4096);
+ memoryRecordsBuilder(buffer, 2, 5).close();
+ memoryRecordsBuilder(buffer, 1, 8).close();
+ memoryRecordsBuilder(buffer, 2, 10).close();
+ memoryRecordsBuilder(buffer, 6, 13).close();
+ memoryRecordsBuilder(buffer, 3, 19).close();
+ memoryRecordsBuilder(buffer, 9, 22).close();
+ memoryRecordsBuilder(buffer, 19, 31).close();
+ buffer.flip();
+ MemoryRecords records = MemoryRecords.readableRecords(buffer);
+ // Set max fetch records to 1, records will be acquired till the first gap is encountered.
+ List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
+ MEMBER_ID,
+ BATCH_SIZE,
+ 1,
+ 5L,
+ fetchPartitionData(records),
+ FETCH_ISOLATION_HWM),
+ 2);
+
+ assertArrayEquals(expectedAcquiredRecord(5, 6, 1).toArray(), acquiredRecordsList.toArray());
+ assertEquals(7, sharePartition.nextFetchOffset());
+ assertEquals(4, sharePartition.cachedState().size());
+ assertEquals(5, sharePartition.cachedState().get(5L).firstOffset());
+ assertEquals(6, sharePartition.cachedState().get(5L).lastOffset());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState());
+ assertEquals(1, sharePartition.cachedState().get(5L).batchDeliveryCount());
+ assertNull(sharePartition.cachedState().get(5L).offsetState());
+ assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState());
+ assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState());
+ assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState());
+ assertNotNull(sharePartition.persisterReadResultGapWindow());
+ assertEquals(7L, sharePartition.persisterReadResultGapWindow().gapStartOffset());
+
+ // Remove first batch from the records as the fetch offset has moved forward to 7 offset.
+ List batch = TestUtils.toList(records.batches());
+ records = records.slice(batch.get(0).sizeInBytes(), records.sizeInBytes() - batch.get(0).sizeInBytes());
+ // Send the batch again to acquire the next set of records.
+ acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
+ MEMBER_ID,
+ BATCH_SIZE,
+ 3,
+ 7L,
+ fetchPartitionData(records),
+ FETCH_ISOLATION_HWM),
+ 4);
+
+ assertArrayEquals(expectedAcquiredRecord(8, 11, 1).toArray(), acquiredRecordsList.toArray());
+ assertEquals(12, sharePartition.nextFetchOffset());
+ assertEquals(5, sharePartition.cachedState().size());
+ assertEquals(8, sharePartition.cachedState().get(8L).firstOffset());
+ assertEquals(11, sharePartition.cachedState().get(8L).lastOffset());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(8L).batchState());
+ assertEquals(1, sharePartition.cachedState().get(8L).batchDeliveryCount());
+ assertNull(sharePartition.cachedState().get(8L).offsetState());
+ assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState());
+ assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState());
+ assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState());
+ assertEquals(30L, sharePartition.endOffset());
+ assertNotNull(sharePartition.persisterReadResultGapWindow());
+ assertEquals(12L, sharePartition.persisterReadResultGapWindow().gapStartOffset());
+
+ // Remove the next 2 batches from the records as the fetch offset has moved forward to 12 offset.
+ int size = batch.get(1).sizeInBytes() + batch.get(2).sizeInBytes();
+ records = records.slice(size, records.sizeInBytes() - size);
+ // Send the records with 8 as max fetch records to acquire new and existing cached batches.
+ acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
+ MEMBER_ID,
+ BATCH_SIZE,
+ 8,
+ 12,
+ fetchPartitionData(records),
+ FETCH_ISOLATION_HWM),
+ 10);
+
+ List expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(13, 14, 1));
+ expectedAcquiredRecords.addAll(expectedAcquiredRecord(15, 18, 3));
+ expectedAcquiredRecords.addAll(expectedAcquiredRecord(19, 19, 1));
+ expectedAcquiredRecords.addAll(expectedAcquiredRecord(23, 25, 1));
+
+ assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray());
+ assertEquals(26, sharePartition.nextFetchOffset());
+ assertEquals(8, sharePartition.cachedState().size());
+ assertEquals(13, sharePartition.cachedState().get(13L).firstOffset());
+ assertEquals(14, sharePartition.cachedState().get(13L).lastOffset());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(13L).batchState());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState());
+ assertEquals(19, sharePartition.cachedState().get(19L).firstOffset());
+ assertEquals(19, sharePartition.cachedState().get(19L).lastOffset());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(19L).batchState());
+ assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState());
+ assertEquals(23, sharePartition.cachedState().get(23L).firstOffset());
+ assertEquals(25, sharePartition.cachedState().get(23L).lastOffset());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(23L).batchState());
+ assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState());
+ assertEquals(30L, sharePartition.endOffset());
+ assertNotNull(sharePartition.persisterReadResultGapWindow());
+ assertEquals(26L, sharePartition.persisterReadResultGapWindow().gapStartOffset());
+
+ // Remove the next 2 batches from the records as the fetch offset has moved forward to 26 offset.
+ // Do not remove the 5th batch as it's only partially acquired.
+ size = batch.get(3).sizeInBytes() + batch.get(4).sizeInBytes();
+ records = records.slice(size, records.sizeInBytes() - size);
+ // Send the records with 10 as max fetch records to acquire the existing and till end of the
+ // fetched data.
+ acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
+ MEMBER_ID,
+ BATCH_SIZE,
+ 10,
+ 26,
+ fetchPartitionData(records),
+ FETCH_ISOLATION_HWM),
+ 24);
+
+ expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(26, 30, 4));
+ expectedAcquiredRecords.addAll(expectedAcquiredRecord(31, 49, 1));
+
+ assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray());
+ assertEquals(50, sharePartition.nextFetchOffset());
+ assertEquals(9, sharePartition.cachedState().size());
+ assertEquals(31, sharePartition.cachedState().get(31L).firstOffset());
+ assertEquals(49, sharePartition.cachedState().get(31L).lastOffset());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(31L).batchState());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(26L).batchState());
+ assertEquals(49L, sharePartition.endOffset());
+ // As all the gaps are now filled, the persisterReadResultGapWindow should be null.
+ assertNull(sharePartition.persisterReadResultGapWindow());
+ }
+
+ @Test
+ public void testMaybeInitializeAndAcquireWithMultipleBatchesAndLastOffsetWithinCachedBatch() {
+ Persister persister = Mockito.mock(Persister.class);
+ ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
+ Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
+ new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
+ PartitionFactory.newPartitionAllData(0, 3, 5L, Errors.NONE.code(), Errors.NONE.message(),
+ List.of(
+ new PersisterStateBatch(15L, 18L, RecordState.AVAILABLE.id, (short) 2),
+ new PersisterStateBatch(20L, 22L, RecordState.ARCHIVED.id, (short) 2),
+ new PersisterStateBatch(26L, 30L, RecordState.AVAILABLE.id, (short) 3)))))));
+ Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
+ SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
+
+ CompletableFuture result = sharePartition.maybeInitialize();
+ assertTrue(result.isDone());
+ assertFalse(result.isCompletedExceptionally());
+
+ assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
+ assertEquals(3, sharePartition.cachedState().size());
+ assertEquals(5, sharePartition.startOffset());
+ assertEquals(30, sharePartition.endOffset());
+ assertEquals(5, sharePartition.nextFetchOffset());
+
+ assertEquals(18, sharePartition.cachedState().get(15L).lastOffset());
+ assertEquals(22, sharePartition.cachedState().get(20L).lastOffset());
+ assertEquals(30, sharePartition.cachedState().get(26L).lastOffset());
+ assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState());
+ assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState());
+ assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState());
+ assertNotNull(sharePartition.persisterReadResultGapWindow());
+ assertEquals(5L, sharePartition.persisterReadResultGapWindow().gapStartOffset());
+ assertEquals(30L, sharePartition.persisterReadResultGapWindow().endOffset());
+
+ // Create multiple batch records that ends in between the cached batch and the fetch offset is
+ // post startOffset.
+ ByteBuffer buffer = ByteBuffer.allocate(4096);
+ memoryRecordsBuilder(buffer, 2, 7).close();
+ memoryRecordsBuilder(buffer, 2, 10).close();
+ memoryRecordsBuilder(buffer, 6, 13).close();
+ // Though 19 offset is a gap but still be acquired.
+ memoryRecordsBuilder(buffer, 8, 20).close();
+ buffer.flip();
+ MemoryRecords records = MemoryRecords.readableRecords(buffer);
+ // Set max fetch records to 500, records should be acquired till the last offset of the fetched batch.
+ List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
+ MEMBER_ID,
+ BATCH_SIZE,
+ 500,
+ 5,
+ fetchPartitionData(records),
+ FETCH_ISOLATION_HWM),
+ 18);
+
+ List expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(7, 14, 1));
+ expectedAcquiredRecords.addAll(expectedAcquiredRecord(15, 18, 3));
+ expectedAcquiredRecords.addAll(expectedAcquiredRecord(19, 19, 1));
+ expectedAcquiredRecords.addAll(expectedAcquiredRecord(23, 25, 1));
+ expectedAcquiredRecords.addAll(expectedAcquiredRecords(26, 27, 4));
+
+ assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray());
+ assertEquals(28, sharePartition.nextFetchOffset());
+ assertEquals(6, sharePartition.cachedState().size());
+ assertEquals(7, sharePartition.cachedState().get(7L).firstOffset());
+ assertEquals(14, sharePartition.cachedState().get(7L).lastOffset());
+ assertEquals(19, sharePartition.cachedState().get(19L).firstOffset());
+ assertEquals(19, sharePartition.cachedState().get(19L).lastOffset());
+ assertEquals(23, sharePartition.cachedState().get(23L).firstOffset());
+ assertEquals(25, sharePartition.cachedState().get(23L).lastOffset());
+
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(19L).batchState());
+ assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(23L).batchState());
+ assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(26L).batchState());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(26L).offsetState().get(26L).state());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(26L).offsetState().get(27L).state());
+ assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).offsetState().get(28L).state());
+ assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).offsetState().get(29L).state());
+ assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).offsetState().get(30L).state());
+ assertEquals(30L, sharePartition.endOffset());
+ assertNotNull(sharePartition.persisterReadResultGapWindow());
+ assertEquals(28L, sharePartition.persisterReadResultGapWindow().gapStartOffset());
+ }
+
+ @Test
+ public void testMaybeInitializeAndAcquireWithMultipleBatchesPriorStartOffset() {
+ Persister persister = Mockito.mock(Persister.class);
+ ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
+ Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of(
+ new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of(
+ PartitionFactory.newPartitionAllData(0, 3, 10L, Errors.NONE.code(), Errors.NONE.message(),
+ List.of(
+ new PersisterStateBatch(15L, 18L, RecordState.AVAILABLE.id, (short) 2),
+ new PersisterStateBatch(20L, 22L, RecordState.ARCHIVED.id, (short) 2),
+ new PersisterStateBatch(26L, 30L, RecordState.AVAILABLE.id, (short) 3)))))));
+ Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
+ SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
+
+ CompletableFuture result = sharePartition.maybeInitialize();
+ assertTrue(result.isDone());
+ assertFalse(result.isCompletedExceptionally());
+
+ assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState());
+ assertEquals(3, sharePartition.cachedState().size());
+ assertEquals(10, sharePartition.startOffset());
+ assertEquals(30, sharePartition.endOffset());
+ assertEquals(10, sharePartition.nextFetchOffset());
+
+ assertEquals(18, sharePartition.cachedState().get(15L).lastOffset());
+ assertEquals(22, sharePartition.cachedState().get(20L).lastOffset());
+ assertEquals(30, sharePartition.cachedState().get(26L).lastOffset());
+ assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState());
+ assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState());
+ assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState());
+ assertNotNull(sharePartition.persisterReadResultGapWindow());
+ assertEquals(10L, sharePartition.persisterReadResultGapWindow().gapStartOffset());
+ assertEquals(30L, sharePartition.persisterReadResultGapWindow().endOffset());
+
+ // Create multiple batch records where multiple batches base offsets are prior startOffset.
+ ByteBuffer buffer = ByteBuffer.allocate(4096);
+ memoryRecordsBuilder(buffer, 2, 3).close();
+ memoryRecordsBuilder(buffer, 1, 6).close();
+ memoryRecordsBuilder(buffer, 4, 8).close();
+ memoryRecordsBuilder(buffer, 10, 13).close();
+ buffer.flip();
+ MemoryRecords records = MemoryRecords.readableRecords(buffer);
+ // Set max fetch records to 500, records should be acquired till the last offset of the fetched batch.
+ List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire(
+ MEMBER_ID,
+ BATCH_SIZE,
+ 500,
+ 10,
+ fetchPartitionData(records),
+ FETCH_ISOLATION_HWM),
+ 10);
+
+ List expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(10, 14, 1));
+ expectedAcquiredRecords.addAll(expectedAcquiredRecord(15, 18, 3));
+ expectedAcquiredRecords.addAll(expectedAcquiredRecord(19, 19, 1));
+
+ assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray());
+ assertEquals(23, sharePartition.nextFetchOffset());
+ assertEquals(5, sharePartition.cachedState().size());
+ assertEquals(10, sharePartition.cachedState().get(10L).firstOffset());
+ assertEquals(14, sharePartition.cachedState().get(10L).lastOffset());
+ assertEquals(19, sharePartition.cachedState().get(19L).firstOffset());
+ assertEquals(19, sharePartition.cachedState().get(19L).lastOffset());
+
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(19L).batchState());
+ assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState());
+ assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState());
+ assertEquals(30L, sharePartition.endOffset());
+ assertNotNull(sharePartition.persisterReadResultGapWindow());
+ assertEquals(20L, sharePartition.persisterReadResultGapWindow().gapStartOffset());
}
@Test
@@ -2368,12 +3035,12 @@ public class SharePartitionTest {
assertEquals(3, sharePartition.stateEpoch());
assertEquals(16, sharePartition.nextFetchOffset());
- SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset();
- assertNotNull(initialReadGapOffset);
+ GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
+ assertNotNull(persisterReadResultGapWindow);
- // After records are acquired, the initialReadGapOffset should be updated
- assertEquals(16, initialReadGapOffset.gapStartOffset());
- assertEquals(40, initialReadGapOffset.endOffset());
+ // After records are acquired, the persisterReadResultGapWindow should be updated
+ assertEquals(16, persisterReadResultGapWindow.gapStartOffset());
+ assertEquals(40, persisterReadResultGapWindow.endOffset());
}
@Test
@@ -2407,12 +3074,12 @@ public class SharePartitionTest {
assertEquals(3, sharePartition.stateEpoch());
assertEquals(41, sharePartition.nextFetchOffset());
- SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset();
- assertNotNull(initialReadGapOffset);
+ GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
+ assertNotNull(persisterReadResultGapWindow);
- // After records are acquired, the initialReadGapOffset should be updated
- assertEquals(21, initialReadGapOffset.gapStartOffset());
- assertEquals(40, initialReadGapOffset.endOffset());
+ // After records are acquired, the persisterReadResultGapWindow should be updated
+ assertEquals(21, persisterReadResultGapWindow.gapStartOffset());
+ assertEquals(40, persisterReadResultGapWindow.endOffset());
}
@Test
@@ -2460,12 +3127,12 @@ public class SharePartitionTest {
assertEquals(3, sharePartition.stateEpoch());
assertEquals(26, sharePartition.nextFetchOffset());
- SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset();
- assertNotNull(initialReadGapOffset);
+ GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
+ assertNotNull(persisterReadResultGapWindow);
- // After records are acquired, the initialReadGapOffset should be updated
- assertEquals(26, initialReadGapOffset.gapStartOffset());
- assertEquals(40, initialReadGapOffset.endOffset());
+ // After records are acquired, the persisterReadResultGapWindow should be updated
+ assertEquals(26, persisterReadResultGapWindow.gapStartOffset());
+ assertEquals(40, persisterReadResultGapWindow.endOffset());
}
@Test
@@ -2504,12 +3171,12 @@ public class SharePartitionTest {
assertEquals(3, sharePartition.stateEpoch());
assertEquals(26, sharePartition.nextFetchOffset());
- SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset();
- assertNotNull(initialReadGapOffset);
+ GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
+ assertNotNull(persisterReadResultGapWindow);
- // After records are acquired, the initialReadGapOffset should be updated
- assertEquals(26, initialReadGapOffset.gapStartOffset());
- assertEquals(40, initialReadGapOffset.endOffset());
+ // After records are acquired, the persisterReadResultGapWindow should be updated
+ assertEquals(26, persisterReadResultGapWindow.gapStartOffset());
+ assertEquals(40, persisterReadResultGapWindow.endOffset());
}
@Test
@@ -2561,12 +3228,12 @@ public class SharePartitionTest {
assertEquals(3, sharePartition.stateEpoch());
assertEquals(86, sharePartition.nextFetchOffset());
- SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset();
- assertNotNull(initialReadGapOffset);
+ GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
+ assertNotNull(persisterReadResultGapWindow);
- // After records are acquired, the initialReadGapOffset should be updated
- assertEquals(86, initialReadGapOffset.gapStartOffset());
- assertEquals(90, initialReadGapOffset.endOffset());
+ // After records are acquired, the persisterReadResultGapWindow should be updated
+ assertEquals(86, persisterReadResultGapWindow.gapStartOffset());
+ assertEquals(90, persisterReadResultGapWindow.endOffset());
}
@Test
@@ -2605,12 +3272,12 @@ public class SharePartitionTest {
assertEquals(3, sharePartition.stateEpoch());
assertEquals(31, sharePartition.nextFetchOffset());
- SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset();
- assertNotNull(initialReadGapOffset);
+ GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
+ assertNotNull(persisterReadResultGapWindow);
- // After records are acquired, the initialReadGapOffset should be updated
- assertEquals(31, initialReadGapOffset.gapStartOffset());
- assertEquals(70, initialReadGapOffset.endOffset());
+ // After records are acquired, the persisterReadResultGapWindow should be updated
+ assertEquals(31, persisterReadResultGapWindow.gapStartOffset());
+ assertEquals(70, persisterReadResultGapWindow.endOffset());
}
@Test
@@ -2656,12 +3323,12 @@ public class SharePartitionTest {
assertEquals(3, sharePartition.stateEpoch());
assertEquals(76, sharePartition.nextFetchOffset());
- SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset();
- assertNotNull(initialReadGapOffset);
+ GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
+ assertNotNull(persisterReadResultGapWindow);
- // After records are acquired, the initialReadGapOffset should be updated
- assertEquals(76, initialReadGapOffset.gapStartOffset());
- assertEquals(90, initialReadGapOffset.endOffset());
+ // After records are acquired, the persisterReadResultGapWindow should be updated
+ assertEquals(76, persisterReadResultGapWindow.gapStartOffset());
+ assertEquals(90, persisterReadResultGapWindow.endOffset());
}
@@ -2709,11 +3376,11 @@ public class SharePartitionTest {
assertEquals(3, sharePartition.stateEpoch());
assertEquals(27, sharePartition.nextFetchOffset());
- SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset();
- assertNotNull(initialReadGapOffset);
+ GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
+ assertNotNull(persisterReadResultGapWindow);
- assertEquals(27, initialReadGapOffset.gapStartOffset());
- assertEquals(40, initialReadGapOffset.endOffset());
+ assertEquals(27, persisterReadResultGapWindow.gapStartOffset());
+ assertEquals(40, persisterReadResultGapWindow.endOffset());
}
@Test
@@ -2758,11 +3425,11 @@ public class SharePartitionTest {
assertEquals(3, sharePartition.stateEpoch());
assertEquals(21, sharePartition.nextFetchOffset());
- SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset();
- assertNotNull(initialReadGapOffset);
+ GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
+ assertNotNull(persisterReadResultGapWindow);
- assertEquals(21, initialReadGapOffset.gapStartOffset());
- assertEquals(40, initialReadGapOffset.endOffset());
+ assertEquals(21, persisterReadResultGapWindow.gapStartOffset());
+ assertEquals(40, persisterReadResultGapWindow.endOffset());
}
@Test
@@ -2807,11 +3474,11 @@ public class SharePartitionTest {
assertEquals(3, sharePartition.stateEpoch());
assertEquals(21, sharePartition.nextFetchOffset());
- SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset();
- assertNotNull(initialReadGapOffset);
+ GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
+ assertNotNull(persisterReadResultGapWindow);
- assertEquals(21, initialReadGapOffset.gapStartOffset());
- assertEquals(40, initialReadGapOffset.endOffset());
+ assertEquals(21, persisterReadResultGapWindow.gapStartOffset());
+ assertEquals(40, persisterReadResultGapWindow.endOffset());
}
@Test
@@ -2859,8 +3526,8 @@ public class SharePartitionTest {
assertEquals(3, sharePartition.stateEpoch());
assertEquals(51, sharePartition.nextFetchOffset());
- SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset();
- assertNull(initialReadGapOffset);
+ GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
+ assertNull(persisterReadResultGapWindow);
}
@Test
@@ -2903,8 +3570,8 @@ public class SharePartitionTest {
assertEquals(3, sharePartition.stateEpoch());
assertEquals(61, sharePartition.nextFetchOffset());
- SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset();
- assertNull(initialReadGapOffset);
+ GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
+ assertNull(persisterReadResultGapWindow);
}
@Test
@@ -2949,8 +3616,8 @@ public class SharePartitionTest {
assertEquals(3, sharePartition.stateEpoch());
assertEquals(61, sharePartition.nextFetchOffset());
- SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset();
- assertNull(initialReadGapOffset);
+ GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
+ assertNull(persisterReadResultGapWindow);
}
@Test
@@ -2998,8 +3665,8 @@ public class SharePartitionTest {
assertEquals(3, sharePartition.stateEpoch());
assertEquals(61, sharePartition.nextFetchOffset());
- SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset();
- assertNull(initialReadGapOffset);
+ GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
+ assertNull(persisterReadResultGapWindow);
}
@Test
@@ -3039,11 +3706,11 @@ public class SharePartitionTest {
assertEquals(3, sharePartition.stateEpoch());
assertEquals(41, sharePartition.nextFetchOffset());
- SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset();
- assertNotNull(initialReadGapOffset);
+ GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
+ assertNotNull(persisterReadResultGapWindow);
- assertEquals(31, initialReadGapOffset.gapStartOffset());
- assertEquals(40, initialReadGapOffset.endOffset());
+ assertEquals(31, persisterReadResultGapWindow.gapStartOffset());
+ assertEquals(40, persisterReadResultGapWindow.endOffset());
// Fetching from the nextFetchOffset so that endOffset moves ahead
records = memoryRecords(15, 41);
@@ -3059,9 +3726,9 @@ public class SharePartitionTest {
assertEquals(3, sharePartition.stateEpoch());
assertEquals(56, sharePartition.nextFetchOffset());
- // Since the endOffset is now moved ahead, the initialReadGapOffset should be empty
- initialReadGapOffset = sharePartition.initialReadGapOffset();
- assertNull(initialReadGapOffset);
+ // Since the endOffset is now moved ahead, the persisterReadResultGapWindow should be empty
+ persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
+ assertNull(persisterReadResultGapWindow);
}
@Test
@@ -4493,6 +5160,122 @@ public class SharePartitionTest {
assertNotNull(sharePartition.cachedState().get(32L).batchAcquisitionLockTimeoutTask());
}
+ @Test
+ public void testLsoMovementForArchivingAllAvailableBatches() {
+ SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
+
+ // A client acquires 4 batches, 11 -> 20, 21 -> 30, 31 -> 40, 41 -> 50.
+ fetchAcquiredRecords(sharePartition, memoryRecords(10, 11), 10);
+ fetchAcquiredRecords(sharePartition, memoryRecords(10, 21), 10);
+ fetchAcquiredRecords(sharePartition, memoryRecords(10, 31), 10);
+ fetchAcquiredRecords(sharePartition, memoryRecords(10, 41), 10);
+
+ // After the acknowledgements, the state of share partition will be:
+ // 1. 11 -> 20: AVAILABLE
+ // 2. 21 -> 30: ACQUIRED
+ // 3. 31 -> 40: AVAILABLE
+ // 4. 41 -> 50: ACQUIRED
+ sharePartition.acknowledge(MEMBER_ID, List.of(
+ new ShareAcknowledgementBatch(11, 20, List.of((byte) 2)),
+ new ShareAcknowledgementBatch(31, 40, List.of((byte) 2))
+ ));
+
+ // Move the LSO to 41. When the LSO moves ahead, all batches that are AVAILABLE before the new LSO will be ARCHIVED.
+ // Thus, the state of the share partition will be:
+ // 1. 11 -> 20: ARCHIVED
+ // 2. 21 -> 30: ACQUIRED
+ // 3. 31 -> 40: ARCHIVED
+ // 4. 41 -> 50: ACQUIRED
+ // Note, the records that are in ACQUIRED state will remain in ACQUIRED state and will be transitioned to a Terminal
+ // state when the corresponding acquisition lock timer task expires.
+ sharePartition.updateCacheAndOffsets(41);
+
+ assertEquals(51, sharePartition.nextFetchOffset());
+ assertEquals(41, sharePartition.startOffset());
+ assertEquals(50, sharePartition.endOffset());
+
+ assertEquals(4, sharePartition.cachedState().size());
+ assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(11L).batchState());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(21L).batchState());
+ assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(31L).batchState());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(41L).batchState());
+
+ // The client acknowledges the batch 21 -> 30. Since this batch is before the LSO, nothing will be done and these
+ // records will remain in the ACQUIRED state.
+ sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(21L, 30L, List.of((byte) 2))));
+
+ // The batch is still in ACQUIRED state.
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(21L).batchState());
+
+ // Once the acquisition lock timer task for the batch 21 -> 30 is expired, these records will directly be
+ // ARCHIVED.
+ sharePartition.cachedState().get(21L).batchAcquisitionLockTimeoutTask().run();
+ assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(21L).batchState());
+ }
+
+ @Test
+ public void testLsoMovementForArchivingAllAvailableOffsets() {
+ SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
+
+ // A client acquires 4 batches, 11 -> 20, 21 -> 30, 31 -> 40, 41 -> 50.
+ fetchAcquiredRecords(sharePartition, memoryRecords(10, 11), 10);
+ fetchAcquiredRecords(sharePartition, memoryRecords(10, 21), 10);
+ fetchAcquiredRecords(sharePartition, memoryRecords(10, 31), 10);
+ fetchAcquiredRecords(sharePartition, memoryRecords(10, 41), 10);
+
+ // After the acknowledgements, the share partition state will be:
+ // 1. 11 -> 20: AVAILABLE
+ // 2. 21 -> 30: ACQUIRED
+ // 3. 31 -> 40: AVAILABLE
+ // 4. 41 -> 50: ACQUIRED
+ sharePartition.acknowledge(MEMBER_ID, List.of(
+ new ShareAcknowledgementBatch(11, 20, List.of((byte) 2)),
+ new ShareAcknowledgementBatch(31, 40, List.of((byte) 2))
+ ));
+
+ // Move the LSO to 36. When the LSO moves ahead, all records that are AVAILABLE before the new LSO will be ARCHIVED.
+ // Thus, the state of the share partition will be:
+ // 1. 11 -> 20: ARCHIVED
+ // 2. 21 -> 30: ACQUIRED
+ // 3. 31 -> 35: ARCHIVED
+ // 3. 36 -> 40: AVAILABLE
+ // 4. 41 -> 50: ACQUIRED
+ // Note, the records that are in ACQUIRED state will remain in ACQUIRED state and will be transitioned to a Terminal
+ // state when the corresponding acquisition lock timer task expires.
+ sharePartition.updateCacheAndOffsets(36);
+
+ assertEquals(36, sharePartition.nextFetchOffset());
+ assertEquals(36, sharePartition.startOffset());
+ assertEquals(50, sharePartition.endOffset());
+
+ assertEquals(4, sharePartition.cachedState().size());
+ assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(11L).batchState());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(21L).batchState());
+ assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(31L).offsetState().get(31L).state());
+ assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(31L).offsetState().get(32L).state());
+ assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(31L).offsetState().get(33L).state());
+ assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(31L).offsetState().get(34L).state());
+ assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(31L).offsetState().get(35L).state());
+ assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(31L).offsetState().get(36L).state());
+ assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(31L).offsetState().get(37L).state());
+ assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(31L).offsetState().get(38L).state());
+ assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(31L).offsetState().get(39L).state());
+ assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(31L).offsetState().get(40L).state());
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(41L).batchState());
+
+ // The client acknowledges the batch 21 -> 30. Since this batch is before the LSO, nothing will be done and these
+ // records will remain in the ACQUIRED state.
+ sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(21L, 30L, List.of((byte) 2))));
+
+ // The batch is still in ACQUIRED state.
+ assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(21L).batchState());
+
+ // Once the acquisition lock timer task for the batch 21 -> 30 is expired, these records will directly be
+ // ARCHIVED.
+ sharePartition.cachedState().get(21L).batchAcquisitionLockTimeoutTask().run();
+ assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(21L).batchState());
+ }
+
@Test
public void testLsoMovementForArchivingOffsets() {
SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build();
@@ -6000,11 +6783,11 @@ public class SharePartitionTest {
assertEquals(40, sharePartition.endOffset());
assertEquals(21, sharePartition.nextFetchOffset());
- SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset();
- assertNotNull(initialReadGapOffset);
+ GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
+ assertNotNull(persisterReadResultGapWindow);
- assertEquals(21, initialReadGapOffset.gapStartOffset());
- assertEquals(40, initialReadGapOffset.endOffset());
+ assertEquals(21, persisterReadResultGapWindow.gapStartOffset());
+ assertEquals(40, persisterReadResultGapWindow.endOffset());
}
@Test
@@ -6574,16 +7357,16 @@ public class SharePartitionTest {
sharePartition.maybeInitialize();
- SharePartition.InitialReadGapOffset initialReadGapOffset = sharePartition.initialReadGapOffset();
- assertNotNull(initialReadGapOffset);
+ GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow();
+ assertNotNull(persisterReadResultGapWindow);
- // Since there is a gap in the beginning, the initialReadGapOffset window is same as the cachedState
- assertEquals(11, initialReadGapOffset.gapStartOffset());
- assertEquals(40, initialReadGapOffset.endOffset());
+ // Since there is a gap in the beginning, the persisterReadResultGapWindow window is same as the cachedState
+ assertEquals(11, persisterReadResultGapWindow.gapStartOffset());
+ assertEquals(40, persisterReadResultGapWindow.endOffset());
long lastOffsetAcknowledged = sharePartition.findLastOffsetAcknowledged();
- // Since the initialReadGapOffset window begins at startOffset, we cannot count any of the offsets as acknowledged.
+ // Since the persisterReadResultGapWindow window begins at startOffset, we cannot count any of the offsets as acknowledged.
// Thus, lastOffsetAcknowledged should be -1
assertEquals(-1, lastOffsetAcknowledged);
}
diff --git a/core/src/test/scala/integration/kafka/server/DynamicBrokerReconfigurationTest.scala b/core/src/test/scala/integration/kafka/server/DynamicBrokerReconfigurationTest.scala
index 75447e04df3..170ee3679f4 100644
--- a/core/src/test/scala/integration/kafka/server/DynamicBrokerReconfigurationTest.scala
+++ b/core/src/test/scala/integration/kafka/server/DynamicBrokerReconfigurationTest.scala
@@ -1100,9 +1100,13 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup
@ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames)
@MethodSource(Array("getTestGroupProtocolParametersAll"))
def testServersCanStartWithInvalidStaticConfigsAndValidDynamicConfigs(groupProtocol: String): Unit = {
+ TestNumReplicaFetcherMetricsReporter.testReporters.clear()
+
// modify snapshot interval config to explicitly take snapshot on a broker with valid dynamic configs
val props = defaultStaticConfig(numServers)
props.put(MetadataLogConfig.METADATA_SNAPSHOT_MAX_INTERVAL_MS_CONFIG, "10000")
+ props.put(MetricConfigs.METRIC_REPORTER_CLASSES_CONFIG, classOf[TestNumReplicaFetcherMetricsReporter].getName)
+ props.put(ReplicationConfigs.NUM_REPLICA_FETCHERS_CONFIG, "1")
val kafkaConfig = KafkaConfig.fromProps(props)
val newBroker = createBroker(kafkaConfig).asInstanceOf[BrokerServer]
@@ -1110,6 +1114,15 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup
alterSslKeystoreUsingConfigCommand(sslProperties1, listenerPrefix(SecureExternal))
+ // Add num.replica.fetchers to the cluster-level config.
+ val clusterLevelProps = new Properties
+ clusterLevelProps.put(ReplicationConfigs.NUM_REPLICA_FETCHERS_CONFIG, "2")
+ reconfigureServers(clusterLevelProps, perBrokerConfig = false, (ReplicationConfigs.NUM_REPLICA_FETCHERS_CONFIG, "2"))
+
+ // Wait for the metrics reporter to be configured
+ val initialReporter = TestNumReplicaFetcherMetricsReporter.waitForReporters(1).head
+ initialReporter.verifyState(reconfigureCount = 1, numFetcher = 2)
+
TestUtils.ensureConsistentKRaftMetadata(servers, controllerServer)
TestUtils.waitUntilTrue(
@@ -1122,11 +1135,19 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup
newBroker.shutdown()
newBroker.awaitShutdown()
+ // Clean up the test reporter
+ TestNumReplicaFetcherMetricsReporter.testReporters.clear()
+
val invalidStaticConfigs = defaultStaticConfig(newBroker.config.brokerId)
invalidStaticConfigs.putAll(securityProps(invalidSslConfigs, KEYSTORE_PROPS, listenerPrefix(SecureExternal)))
newBroker.config.updateCurrentConfig(KafkaConfig.fromProps(invalidStaticConfigs))
newBroker.startup()
+
+ // Verify that the custom MetricsReporter is not reconfigured after restart.
+ // If readDynamicBrokerConfigsFromSnapshot works correctly, the reporter should maintain its state.
+ val reporterAfterRestart = TestNumReplicaFetcherMetricsReporter.waitForReporters(1).head
+ reporterAfterRestart.verifyState(reconfigureCount = 0, numFetcher = 2)
}
private def awaitInitialPositions(consumer: Consumer[_, _]): Unit = {
@@ -1635,6 +1656,64 @@ class TestMetricsReporter extends MetricsReporter with Reconfigurable with Close
}
}
+object TestNumReplicaFetcherMetricsReporter {
+ val testReporters = new ConcurrentLinkedQueue[TestNumReplicaFetcherMetricsReporter]()
+
+ def waitForReporters(count: Int): List[TestNumReplicaFetcherMetricsReporter] = {
+ TestUtils.waitUntilTrue(() => testReporters.size == count, msg = "Metrics reporters size not matched. Expected: " + count + ", actual: " + testReporters.size())
+
+ val reporters = testReporters.asScala.toList
+ TestUtils.waitUntilTrue(() => reporters.forall(_.configureCount == 1), msg = "Metrics reporters not configured")
+ reporters
+ }
+}
+
+
+class TestNumReplicaFetcherMetricsReporter extends MetricsReporter {
+ import TestNumReplicaFetcherMetricsReporter._
+ @volatile var configureCount = 0
+ @volatile var reconfigureCount = 0
+ @volatile var numFetchers: Int = 1
+ testReporters.add(this)
+
+ override def init(metrics: util.List[KafkaMetric]): Unit = {
+ }
+
+ override def configure(configs: util.Map[String, _]): Unit = {
+ configureCount += 1
+ numFetchers = configs.get(ReplicationConfigs.NUM_REPLICA_FETCHERS_CONFIG).toString.toInt
+ }
+
+ override def metricChange(metric: KafkaMetric): Unit = {
+ }
+
+ override def metricRemoval(metric: KafkaMetric): Unit = {
+ }
+
+ override def reconfigurableConfigs(): util.Set[String] = {
+ util.Set.of(ReplicationConfigs.NUM_REPLICA_FETCHERS_CONFIG)
+ }
+
+ override def validateReconfiguration(configs: util.Map[String, _]): Unit = {
+ val numFetchers = configs.get(ReplicationConfigs.NUM_REPLICA_FETCHERS_CONFIG).toString.toInt
+ if (numFetchers <= 0)
+ throw new ConfigException(s"Invalid num.replica.fetchers $numFetchers")
+ }
+
+ override def reconfigure(configs: util.Map[String, _]): Unit = {
+ reconfigureCount += 1
+ numFetchers = configs.get(ReplicationConfigs.NUM_REPLICA_FETCHERS_CONFIG).toString.toInt
+ }
+
+ override def close(): Unit = {
+ }
+
+ def verifyState(reconfigureCount: Int, numFetcher: Int = 1): Unit = {
+ assertEquals(reconfigureCount, this.reconfigureCount)
+ assertEquals(numFetcher, this.numFetchers)
+ }
+}
+
class MockFileConfigProvider extends FileConfigProvider {
@throws(classOf[IOException])
diff --git a/core/src/test/scala/unit/kafka/log/UnifiedLogTest.scala b/core/src/test/scala/unit/kafka/log/UnifiedLogTest.scala
index d30d5a1040e..da54113ae5c 100755
--- a/core/src/test/scala/unit/kafka/log/UnifiedLogTest.scala
+++ b/core/src/test/scala/unit/kafka/log/UnifiedLogTest.scala
@@ -41,7 +41,7 @@ import org.apache.kafka.server.storage.log.{FetchIsolation, UnexpectedAppendOffs
import org.apache.kafka.server.util.{KafkaScheduler, MockTime, Scheduler}
import org.apache.kafka.storage.internals.checkpoint.{LeaderEpochCheckpointFile, PartitionMetadataFile}
import org.apache.kafka.storage.internals.epoch.LeaderEpochFileCache
-import org.apache.kafka.storage.internals.log.{AbortedTxn, AppendOrigin, Cleaner, EpochEntry, LogConfig, LogFileUtils, LogOffsetMetadata, LogOffsetSnapshot, LogOffsetsListener, LogSegment, LogSegments, LogStartOffsetIncrementReason, LogToClean, OffsetResultHolder, OffsetsOutOfOrderException, ProducerStateManager, ProducerStateManagerConfig, RecordValidationException, UnifiedLog, VerificationGuard}
+import org.apache.kafka.storage.internals.log.{AbortedTxn, AppendOrigin, AsyncOffsetReader, Cleaner, EpochEntry, LogConfig, LogFileUtils, LogOffsetMetadata, LogOffsetSnapshot, LogOffsetsListener, LogSegment, LogSegments, LogStartOffsetIncrementReason, LogToClean, OffsetResultHolder, OffsetsOutOfOrderException, ProducerStateManager, ProducerStateManagerConfig, RecordValidationException, UnifiedLog, VerificationGuard}
import org.apache.kafka.storage.internals.utils.Throttler
import org.apache.kafka.storage.log.metrics.{BrokerTopicMetrics, BrokerTopicStats}
import org.junit.jupiter.api.Assertions._
@@ -2416,6 +2416,193 @@ class UnifiedLogTest {
KafkaConfig.fromProps(props)
}
+ @Test
+ def testFetchEarliestPendingUploadTimestampNoRemoteStorage(): Unit = {
+ val logConfig = LogTestUtils.createLogConfig(segmentBytes = 200, indexIntervalBytes = 1)
+ val log = createLog(logDir, logConfig)
+
+ // Test initial state before any records
+ assertFetchOffsetBySpecialTimestamp(log, None, new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, -1, Optional.of(-1)),
+ ListOffsetsRequest.EARLIEST_PENDING_UPLOAD_TIMESTAMP)
+
+ // Append records
+ val _ = prepareLogWithSequentialRecords(log, recordCount = 2)
+
+ // Test state after records are appended
+ assertFetchOffsetBySpecialTimestamp(log, None, new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, -1, Optional.of(-1)),
+ ListOffsetsRequest.EARLIEST_PENDING_UPLOAD_TIMESTAMP)
+ }
+
+ @Test
+ def testFetchEarliestPendingUploadTimestampWithRemoteStorage(): Unit = {
+ val logStartOffset = 0
+ val (remoteLogManager: RemoteLogManager, log: UnifiedLog, timestampAndEpochs: Seq[TimestampAndEpoch]) = prepare(logStartOffset)
+
+ val (firstTimestamp, firstLeaderEpoch) = (timestampAndEpochs.head.timestamp, timestampAndEpochs.head.leaderEpoch)
+ val (secondTimestamp, secondLeaderEpoch) = (timestampAndEpochs(1).timestamp, timestampAndEpochs(1).leaderEpoch)
+ val (_, thirdLeaderEpoch) = (timestampAndEpochs(2).timestamp, timestampAndEpochs(2).leaderEpoch)
+
+ doAnswer(ans => {
+ val timestamp = ans.getArgument(1).asInstanceOf[Long]
+ Optional.of(timestamp)
+ .filter(_ == timestampAndEpochs.head.timestamp)
+ .map[TimestampAndOffset](x => new TimestampAndOffset(x, 0L, Optional.of(timestampAndEpochs.head.leaderEpoch)))
+ }).when(remoteLogManager).findOffsetByTimestamp(ArgumentMatchers.eq(log.topicPartition),
+ anyLong(), anyLong(), ArgumentMatchers.eq(log.leaderEpochCache))
+
+ // Offset 0 (first timestamp) is in remote storage and deleted locally. Offset 1 (second timestamp) is in local storage.
+ log.updateLocalLogStartOffset(1)
+ log.updateHighestOffsetInRemoteStorage(0)
+
+ // In the assertions below we test that offset 0 (first timestamp) is only in remote and offset 1 (second timestamp) is in local storage.
+ assertFetchOffsetByTimestamp(log, Some(remoteLogManager), Some(new TimestampAndOffset(firstTimestamp, 0L, Optional.of(firstLeaderEpoch))), firstTimestamp)
+ assertFetchOffsetByTimestamp(log, Some(remoteLogManager), Some(new TimestampAndOffset(secondTimestamp, 1L, Optional.of(secondLeaderEpoch))), secondTimestamp)
+ assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager), new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch)),
+ ListOffsetsRequest.EARLIEST_TIMESTAMP)
+ assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager), new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch)),
+ ListOffsetsRequest.LATEST_TIERED_TIMESTAMP)
+ assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager), new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 1L, Optional.of(secondLeaderEpoch)),
+ ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP)
+ assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager), new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 3L, Optional.of(thirdLeaderEpoch)),
+ ListOffsetsRequest.LATEST_TIMESTAMP)
+ assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager), new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 1L, Optional.of(secondLeaderEpoch)),
+ ListOffsetsRequest.EARLIEST_PENDING_UPLOAD_TIMESTAMP)
+ }
+
+ @Test
+ def testFetchEarliestPendingUploadTimestampWithRemoteStorageNoLocalDeletion(): Unit = {
+ val logStartOffset = 0
+ val (remoteLogManager: RemoteLogManager, log: UnifiedLog, timestampAndEpochs: Seq[TimestampAndEpoch]) = prepare(logStartOffset)
+
+ val (firstTimestamp, firstLeaderEpoch) = (timestampAndEpochs.head.timestamp, timestampAndEpochs.head.leaderEpoch)
+ val (secondTimestamp, secondLeaderEpoch) = (timestampAndEpochs(1).timestamp, timestampAndEpochs(1).leaderEpoch)
+ val (_, thirdLeaderEpoch) = (timestampAndEpochs(2).timestamp, timestampAndEpochs(2).leaderEpoch)
+
+ // Offsets upto 1 are in remote storage
+ doAnswer(ans => {
+ val timestamp = ans.getArgument(1).asInstanceOf[Long]
+ Optional.of(
+ timestamp match {
+ case x if x == firstTimestamp => new TimestampAndOffset(x, 0L, Optional.of(firstLeaderEpoch))
+ case x if x == secondTimestamp => new TimestampAndOffset(x, 1L, Optional.of(secondLeaderEpoch))
+ case _ => null
+ }
+ )
+ }).when(remoteLogManager).findOffsetByTimestamp(ArgumentMatchers.eq(log.topicPartition),
+ anyLong(), anyLong(), ArgumentMatchers.eq(log.leaderEpochCache))
+
+ // Offsets 0, 1 (first and second timestamps) are in remote storage and not deleted locally.
+ log.updateLocalLogStartOffset(0)
+ log.updateHighestOffsetInRemoteStorage(1)
+
+ // In the assertions below we test that offset 0 (first timestamp) and offset 1 (second timestamp) are on both remote and local storage
+ assertFetchOffsetByTimestamp(log, Some(remoteLogManager), Some(new TimestampAndOffset(firstTimestamp, 0L, Optional.of(firstLeaderEpoch))), firstTimestamp)
+ assertFetchOffsetByTimestamp(log, Some(remoteLogManager), Some(new TimestampAndOffset(secondTimestamp, 1L, Optional.of(secondLeaderEpoch))), secondTimestamp)
+ assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager), new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch)),
+ ListOffsetsRequest.EARLIEST_TIMESTAMP)
+ assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 1L, Optional.of(secondLeaderEpoch)),
+ ListOffsetsRequest.LATEST_TIERED_TIMESTAMP)
+ assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch)),
+ ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP)
+ assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 3L, Optional.of(thirdLeaderEpoch)),
+ ListOffsetsRequest.LATEST_TIMESTAMP)
+ assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 2L, Optional.of(thirdLeaderEpoch)),
+ ListOffsetsRequest.EARLIEST_PENDING_UPLOAD_TIMESTAMP)
+ }
+
+ @Test
+ def testFetchEarliestPendingUploadTimestampNoSegmentsUploaded(): Unit = {
+ val logStartOffset = 0
+ val (remoteLogManager: RemoteLogManager, log: UnifiedLog, timestampAndEpochs: Seq[TimestampAndEpoch]) = prepare(logStartOffset)
+
+ val (firstTimestamp, firstLeaderEpoch) = (timestampAndEpochs.head.timestamp, timestampAndEpochs.head.leaderEpoch)
+ val (secondTimestamp, secondLeaderEpoch) = (timestampAndEpochs(1).timestamp, timestampAndEpochs(1).leaderEpoch)
+ val (_, thirdLeaderEpoch) = (timestampAndEpochs(2).timestamp, timestampAndEpochs(2).leaderEpoch)
+
+ // No offsets are in remote storage
+ doAnswer(_ => Optional.empty[TimestampAndOffset]())
+ .when(remoteLogManager).findOffsetByTimestamp(ArgumentMatchers.eq(log.topicPartition),
+ anyLong(), anyLong(), ArgumentMatchers.eq(log.leaderEpochCache))
+
+ // Offsets 0, 1, 2 (first, second and third timestamps) are in local storage only and not uploaded to remote storage.
+ log.updateLocalLogStartOffset(0)
+ log.updateHighestOffsetInRemoteStorage(-1)
+
+ // In the assertions below we test that offset 0 (first timestamp), offset 1 (second timestamp) and offset 2 (third timestamp) are only on the local storage.
+ assertFetchOffsetByTimestamp(log, Some(remoteLogManager), Some(new TimestampAndOffset(firstTimestamp, 0L, Optional.of(firstLeaderEpoch))), firstTimestamp)
+ assertFetchOffsetByTimestamp(log, Some(remoteLogManager), Some(new TimestampAndOffset(secondTimestamp, 1L, Optional.of(secondLeaderEpoch))), secondTimestamp)
+ assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch)),
+ ListOffsetsRequest.EARLIEST_TIMESTAMP)
+ assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, -1L, Optional.of(-1)),
+ ListOffsetsRequest.LATEST_TIERED_TIMESTAMP)
+ assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch)),
+ ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP)
+ assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 3L, Optional.of(thirdLeaderEpoch)),
+ ListOffsetsRequest.LATEST_TIMESTAMP)
+ assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch)),
+ ListOffsetsRequest.EARLIEST_PENDING_UPLOAD_TIMESTAMP)
+ }
+
+ @Test
+ def testFetchEarliestPendingUploadTimestampStaleHighestOffsetInRemote(): Unit = {
+ val logStartOffset = 100
+ val (remoteLogManager: RemoteLogManager, log: UnifiedLog, timestampAndEpochs: Seq[TimestampAndEpoch]) = prepare(logStartOffset)
+
+ val (firstTimestamp, firstLeaderEpoch) = (timestampAndEpochs.head.timestamp, timestampAndEpochs.head.leaderEpoch)
+ val (secondTimestamp, secondLeaderEpoch) = (timestampAndEpochs(1).timestamp, timestampAndEpochs(1).leaderEpoch)
+ val (_, thirdLeaderEpoch) = (timestampAndEpochs(2).timestamp, timestampAndEpochs(2).leaderEpoch)
+
+ // Offsets 100, 101, 102 (first, second and third timestamps) are in local storage and not uploaded to remote storage.
+ // Tiered storage copy was disabled and then enabled again, because of which the remote log segments are deleted but
+ // the highest offset in remote storage has become stale
+ doAnswer(_ => Optional.empty[TimestampAndOffset]())
+ .when(remoteLogManager).findOffsetByTimestamp(ArgumentMatchers.eq(log.topicPartition),
+ anyLong(), anyLong(), ArgumentMatchers.eq(log.leaderEpochCache))
+
+ log.updateLocalLogStartOffset(100)
+ log.updateHighestOffsetInRemoteStorage(50)
+
+ // In the assertions below we test that offset 100 (first timestamp), offset 101 (second timestamp) and offset 102 (third timestamp) are only on the local storage.
+ assertFetchOffsetByTimestamp(log, Some(remoteLogManager), Some(new TimestampAndOffset(firstTimestamp, 100L, Optional.of(firstLeaderEpoch))), firstTimestamp)
+ assertFetchOffsetByTimestamp(log, Some(remoteLogManager), Some(new TimestampAndOffset(secondTimestamp, 101L, Optional.of(secondLeaderEpoch))), secondTimestamp)
+ assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 100L, Optional.of(firstLeaderEpoch)),
+ ListOffsetsRequest.EARLIEST_TIMESTAMP)
+ assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 50L, Optional.empty()),
+ ListOffsetsRequest.LATEST_TIERED_TIMESTAMP)
+ assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 100L, Optional.of(firstLeaderEpoch)),
+ ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP)
+ assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 103L, Optional.of(thirdLeaderEpoch)),
+ ListOffsetsRequest.LATEST_TIMESTAMP)
+ assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 100L, Optional.of(firstLeaderEpoch)),
+ ListOffsetsRequest.EARLIEST_PENDING_UPLOAD_TIMESTAMP)
+ }
+
+ private def prepare(logStartOffset: Int): (RemoteLogManager, UnifiedLog, Seq[TimestampAndEpoch]) = {
+ val config: KafkaConfig = createKafkaConfigWithRLM
+ val purgatory = new DelayedOperationPurgatory[DelayedRemoteListOffsets]("RemoteListOffsets", config.brokerId)
+ val remoteLogManager = spy(new RemoteLogManager(config.remoteLogManagerConfig,
+ 0,
+ logDir.getAbsolutePath,
+ "clusterId",
+ mockTime,
+ _ => Optional.empty[UnifiedLog](),
+ (_, _) => {},
+ brokerTopicStats,
+ new Metrics(),
+ Optional.empty))
+ remoteLogManager.setDelayedOperationPurgatory(purgatory)
+
+ val logConfig = LogTestUtils.createLogConfig(segmentBytes = 200, indexIntervalBytes = 1, remoteLogStorageEnable = true)
+ val log = createLog(logDir, logConfig, logStartOffset = logStartOffset, remoteStorageSystemEnable = true, remoteLogManager = Some(remoteLogManager))
+
+ // Verify earliest pending upload offset for empty log
+ assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager), new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, logStartOffset, Optional.empty()),
+ ListOffsetsRequest.EARLIEST_PENDING_UPLOAD_TIMESTAMP)
+
+ val timestampAndEpochs = prepareLogWithSequentialRecords(log, recordCount = 3)
+ (remoteLogManager, log, timestampAndEpochs)
+ }
+
/**
* Test the Log truncate operations
*/
@@ -4786,6 +4973,44 @@ class UnifiedLogTest {
(log, segmentWithOverflow)
}
+
+ private def assertFetchOffsetByTimestamp(log: UnifiedLog, remoteLogManagerOpt: Option[RemoteLogManager], expected: Option[TimestampAndOffset], timestamp: Long): Unit = {
+ val remoteOffsetReader = getRemoteOffsetReader(remoteLogManagerOpt)
+ val offsetResultHolder = log.fetchOffsetByTimestamp(timestamp, remoteOffsetReader)
+ assertTrue(offsetResultHolder.futureHolderOpt.isPresent)
+ offsetResultHolder.futureHolderOpt.get.taskFuture.get(1, TimeUnit.SECONDS)
+ assertTrue(offsetResultHolder.futureHolderOpt.get.taskFuture.isDone)
+ assertTrue(offsetResultHolder.futureHolderOpt.get.taskFuture.get().hasTimestampAndOffset)
+ assertEquals(expected.get, offsetResultHolder.futureHolderOpt.get.taskFuture.get().timestampAndOffset().orElse(null))
+ }
+
+ private def assertFetchOffsetBySpecialTimestamp(log: UnifiedLog, remoteLogManagerOpt: Option[RemoteLogManager], expected: TimestampAndOffset, timestamp: Long): Unit = {
+ val remoteOffsetReader = getRemoteOffsetReader(remoteLogManagerOpt)
+ val offsetResultHolder = log.fetchOffsetByTimestamp(timestamp, remoteOffsetReader)
+ assertEquals(new OffsetResultHolder(expected), offsetResultHolder)
+ }
+
+ private def getRemoteOffsetReader(remoteLogManagerOpt: Option[Any]): Optional[AsyncOffsetReader] = {
+ remoteLogManagerOpt match {
+ case Some(remoteLogManager) => Optional.of(remoteLogManager.asInstanceOf[AsyncOffsetReader])
+ case None => Optional.empty[AsyncOffsetReader]()
+ }
+ }
+
+ private def prepareLogWithSequentialRecords(log: UnifiedLog, recordCount: Int): Seq[TimestampAndEpoch] = {
+ val firstTimestamp = mockTime.milliseconds()
+
+ (0 until recordCount).map { i =>
+ val timestampAndEpoch = TimestampAndEpoch(firstTimestamp + i, i)
+ log.appendAsLeader(
+ TestUtils.singletonRecords(value = TestUtils.randomBytes(10), timestamp = timestampAndEpoch.timestamp),
+ timestampAndEpoch.leaderEpoch
+ )
+ timestampAndEpoch
+ }
+ }
+
+ case class TimestampAndEpoch(timestamp: Long, leaderEpoch: Int)
}
object UnifiedLogTest {
diff --git a/core/src/test/scala/unit/kafka/server/AbstractFetcherThreadTest.scala b/core/src/test/scala/unit/kafka/server/AbstractFetcherThreadTest.scala
index 1d0dfe019d8..046ef52a7de 100644
--- a/core/src/test/scala/unit/kafka/server/AbstractFetcherThreadTest.scala
+++ b/core/src/test/scala/unit/kafka/server/AbstractFetcherThreadTest.scala
@@ -1180,4 +1180,564 @@ class AbstractFetcherThreadTest {
fetcher.processFetchRequest(partitionData, fetchRequestOpt)
assertEquals(0, replicaState.logEndOffset, "FetchResponse should be ignored when leader epoch does not match")
}
+
+ private def emptyReplicaState(rlmEnabled: Boolean, partition: TopicPartition, fetcher: MockFetcherThread): PartitionState = {
+ // Follower begins with an empty log
+ val replicaState = PartitionState(Seq(), leaderEpoch = 0, highWatermark = 0L, rlmEnabled = rlmEnabled)
+ fetcher.setReplicaState(partition, replicaState)
+ fetcher.addPartitions(Map(partition -> initialFetchState(topicIds.get(partition.topic), fetchOffset = 0, leaderEpoch = 0)))
+ replicaState
+ }
+
+ /**
+ * Test: Empty Follower Fetch with TieredStorage Disabled and Leader LogStartOffset = 0
+ *
+ * Purpose:
+ * - Simulate a leader with logs starting at offset 0 and validate how the follower
+ * behaves when TieredStorage is disabled.
+ *
+ * Conditions:
+ * - TieredStorage: **Disabled**
+ * - Leader LogStartOffset: **0**
+ *
+ * Scenario:
+ * - The leader starts with a log at offset 0, containing three record batches offset at 0, 150, and 199.
+ * - The follower begins fetching, and we validate the correctness of its replica state as it fetches.
+ *
+ * Expected Outcomes:
+ * 1. The follower fetch state should transition to `FETCHING` initially.
+ * 2. After the first poll, one record batch is fetched.
+ * 3. After subsequent polls, the entire leader log is fetched:
+ * - Replica log size: 3
+ * - Replica LogStartOffset: 0
+ * - Replica LogEndOffset: 200
+ * - Replica HighWatermark: 199
+ */
+ @Test
+ def testEmptyFollowerFetchTieredStorageDisabledLeaderLogStartOffsetZero(): Unit = {
+ val rlmEnabled = false
+ val partition = new TopicPartition("topic1", 0)
+ val mockLeaderEndpoint = new MockLeaderEndPoint(version = version)
+ val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint)
+ val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine)
+
+ val replicaState = emptyReplicaState(rlmEnabled, partition, fetcher)
+
+ val leaderLog = Seq(
+ // LogStartOffset = LocalLogStartOffset = 0
+ mkBatch(baseOffset = 0, leaderEpoch = 0, new SimpleRecord("c".getBytes)),
+ mkBatch(baseOffset = 150, leaderEpoch = 0, new SimpleRecord("d".getBytes)),
+ mkBatch(baseOffset = 199, leaderEpoch = 0, new SimpleRecord("e".getBytes))
+ )
+
+ val leaderState = PartitionState(
+ leaderLog,
+ leaderEpoch = 0,
+ highWatermark = 199L,
+ rlmEnabled = rlmEnabled
+ )
+ fetcher.mockLeader.setLeaderState(partition, leaderState)
+ fetcher.mockLeader.setReplicaPartitionStateCallback(fetcher.replicaPartitionState)
+
+ fetcher.doWork()
+ assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state))
+ assertEquals(1, replicaState.log.size)
+ assertEquals(0, replicaState.logStartOffset)
+ assertEquals(1, replicaState.logEndOffset)
+ assertEquals(Some(1), fetcher.fetchState(partition).map(_.fetchOffset()))
+
+ // Only 1 record batch is returned after a poll so calling 'n' number of times to get the desired result.
+ for (_ <- 1 to 2) fetcher.doWork()
+ assertEquals(3, replicaState.log.size)
+ assertEquals(0, replicaState.logStartOffset)
+ assertEquals(200, replicaState.logEndOffset)
+ assertEquals(199, replicaState.highWatermark)
+ }
+
+ /**
+ * Test: Empty Follower Fetch with TieredStorage Disabled and Leader LogStartOffset != 0
+ *
+ * Purpose:
+ * - Validate follower behavior when the leader's log starts at a non-zero offset (10).
+ *
+ * Conditions:
+ * - TieredStorage: **Disabled**
+ * - Leader LogStartOffset: **10**
+ *
+ * Scenario:
+ * - The leader log starts at offset 10 with batches at 10, 150, and 199.
+ * - The follower starts fetching from offset 10.
+ *
+ * Expected Outcomes:
+ * 1. The follower's initial log is empty.
+ * 2. Replica offsets after polls:
+ * - LogStartOffset = 10
+ * - LogEndOffset = 200
+ * - HighWatermark = 199
+ */
+ @Test
+ def testEmptyFollowerFetchTieredStorageDisabledLeaderLogStartOffsetNonZero(): Unit = {
+ val rlmEnabled = false
+ val partition = new TopicPartition("topic1", 0)
+ val mockLeaderEndpoint = new MockLeaderEndPoint(version = version)
+ val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint)
+ val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine)
+
+ val replicaState = emptyReplicaState(rlmEnabled, partition, fetcher)
+
+ val leaderLog = Seq(
+ // LogStartOffset = LocalLogStartOffset = 10
+ mkBatch(baseOffset = 10, leaderEpoch = 0, new SimpleRecord("c".getBytes)),
+ mkBatch(baseOffset = 150, leaderEpoch = 0, new SimpleRecord("d".getBytes)),
+ mkBatch(baseOffset = 199, leaderEpoch = 0, new SimpleRecord("e".getBytes))
+ )
+
+ val leaderState = PartitionState(
+ leaderLog,
+ leaderEpoch = 0,
+ highWatermark = 199L,
+ rlmEnabled = rlmEnabled
+ )
+ fetcher.mockLeader.setLeaderState(partition, leaderState)
+ fetcher.mockLeader.setReplicaPartitionStateCallback(fetcher.replicaPartitionState)
+
+ fetcher.doWork()
+ // Follower gets out-of-range error (no messages received), fetch offset is updated from 0 to 10
+ assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state))
+ assertEquals(0, replicaState.log.size)
+ assertEquals(10, replicaState.logStartOffset)
+ assertEquals(10, replicaState.logEndOffset)
+ assertEquals(Some(10), fetcher.fetchState(partition).map(_.fetchOffset()))
+
+ // Only 1 record batch is returned after a poll so calling 'n' number of times to get the desired result.
+ for (_ <- 1 to 3) fetcher.doWork()
+ assertEquals(3, replicaState.log.size)
+ assertEquals(10, replicaState.logStartOffset)
+ assertEquals(200, replicaState.logEndOffset)
+ assertEquals(199, replicaState.highWatermark)
+ }
+
+ /**
+ * Test: Empty Follower Fetch with TieredStorage Enabled, Leader LogStartOffset = 0, and No Local Deletions
+ *
+ * Purpose:
+ * - Simulate TieredStorage enabled and validate follower fetching behavior when the leader
+ * log starts at 0 and no segments have been uploaded or deleted locally.
+ *
+ * Conditions:
+ * - TieredStorage: **Enabled**
+ * - Leader LogStartOffset: **0**
+ * - Leader LocalLogStartOffset: **0** (No local segments deleted).
+ *
+ * Scenario:
+ * - The leader log contains three record batches at offsets 0, 150, and 199.
+ * - The follower starts fetching from offset 0.
+ *
+ * Expected Outcomes:
+ * 1. The replica log accurately reflects the leader's log:
+ * - LogStartOffset = 0
+ * - LocalLogStartOffset = 0
+ * - LogEndOffset = 200
+ * - HighWatermark = 199
+ */
+ @Test
+ def testEmptyFollowerFetchTieredStorageEnabledLeaderLogStartOffsetZeroNoLocalDeletions(): Unit = {
+ val rlmEnabled = true
+ val partition = new TopicPartition("topic1", 0)
+ val mockLeaderEndpoint = new MockLeaderEndPoint(version = version)
+ val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint)
+ val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine)
+
+ val replicaState = emptyReplicaState(rlmEnabled, partition, fetcher)
+
+ val leaderLog = Seq(
+ // LogStartOffset = LocalLogStartOffset = 0
+ mkBatch(baseOffset = 0, leaderEpoch = 0, new SimpleRecord("c".getBytes)),
+ mkBatch(baseOffset = 150, leaderEpoch = 0, new SimpleRecord("d".getBytes)),
+ mkBatch(baseOffset = 199, leaderEpoch = 0, new SimpleRecord("e".getBytes))
+ )
+
+ val leaderState = PartitionState(
+ leaderLog,
+ leaderEpoch = 0,
+ highWatermark = 199L,
+ rlmEnabled = rlmEnabled
+ )
+ fetcher.mockLeader.setLeaderState(partition, leaderState)
+ fetcher.mockLeader.setReplicaPartitionStateCallback(fetcher.replicaPartitionState)
+
+ fetcher.doWork()
+ assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state))
+ assertEquals(1, replicaState.log.size)
+ assertEquals(0, replicaState.logStartOffset)
+ assertEquals(0, replicaState.localLogStartOffset)
+ assertEquals(1, replicaState.logEndOffset)
+ assertEquals(199, replicaState.highWatermark)
+ assertEquals(Some(1), fetcher.fetchState(partition).map(_.fetchOffset()))
+
+ // Only 1 record batch is returned after a poll so calling 'n' number of times to get the desired result.
+ for (_ <- 1 to 2) fetcher.doWork()
+ assertEquals(3, replicaState.log.size)
+ assertEquals(0, replicaState.logStartOffset)
+ assertEquals(0, replicaState.localLogStartOffset)
+ assertEquals(200, replicaState.logEndOffset)
+ assertEquals(199, replicaState.highWatermark)
+ }
+
+ /**
+ * Test: Empty Follower Fetch with TieredStorage Enabled, Leader LogStartOffset = 0, and Local Deletions
+ *
+ * Purpose:
+ * - Simulate TieredStorage enabled with some segments uploaded and deleted locally, causing
+ * a difference between the leader's LogStartOffset (0) and LocalLogStartOffset (> 0).
+ *
+ * Conditions:
+ * - TieredStorage: **Enabled**
+ * - Leader LogStartOffset: **0**
+ * - Leader LocalLogStartOffset: **100** (Some segments deleted locally).
+ *
+ * Scenario:
+ * - The leader log starts at offset 0 but the local leader log starts at offset 100.
+ * - The follower fetch operation begins from offset 0.
+ *
+ * Expected Outcomes:
+ * 1. After offset adjustments for local deletions:
+ * - LogStartOffset = 0
+ * - LocalLogStartOffset = 100
+ * - LogEndOffset = 200
+ * - HighWatermark = 199
+ */
+ @Test
+ def testEmptyFollowerFetchTieredStorageEnabledLeaderLogStartOffsetZeroWithLocalDeletions(): Unit = {
+ val rlmEnabled = true
+ val partition = new TopicPartition("topic1", 0)
+ val mockLeaderEndpoint = new MockLeaderEndPoint(version = version)
+ val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint)
+ val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine)
+
+ val replicaState = emptyReplicaState(rlmEnabled, partition, fetcher)
+
+ val leaderLog = Seq(
+ // LocalLogStartOffset = 100
+ mkBatch(baseOffset = 100, leaderEpoch = 0, new SimpleRecord("c".getBytes)),
+ mkBatch(baseOffset = 150, leaderEpoch = 0, new SimpleRecord("d".getBytes)),
+ mkBatch(baseOffset = 199, leaderEpoch = 0, new SimpleRecord("e".getBytes))
+ )
+
+ val leaderState = PartitionState(
+ leaderLog,
+ leaderEpoch = 0,
+ highWatermark = 199L,
+ rlmEnabled = rlmEnabled
+ )
+ leaderState.logStartOffset = 0
+ fetcher.mockLeader.setLeaderState(partition, leaderState)
+ fetcher.mockLeader.setReplicaPartitionStateCallback(fetcher.replicaPartitionState)
+
+ fetcher.doWork()
+ assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state))
+ assertEquals(0, replicaState.log.size)
+ assertEquals(100, replicaState.localLogStartOffset)
+ assertEquals(100, replicaState.logEndOffset)
+ assertEquals(Some(100), fetcher.fetchState(partition).map(_.fetchOffset()))
+
+ // Only 1 record batch is returned after a poll so calling 'n' number of times to get the desired result.
+ for (_ <- 1 to 3) fetcher.doWork()
+ assertEquals(3, replicaState.log.size)
+ assertEquals(0, replicaState.logStartOffset)
+ assertEquals(100, replicaState.localLogStartOffset)
+ assertEquals(200, replicaState.logEndOffset)
+ assertEquals(199, replicaState.highWatermark)
+ }
+
+ /**
+ * Test: Empty Follower Fetch with TieredStorage Enabled, Leader LogStartOffset != 0, and No Local Deletions
+ *
+ * Purpose:
+ * - Simulate TieredStorage enabled and validate follower fetch behavior when the leader's log
+ * starts at a non-zero offset and no local deletions have occurred.
+ *
+ * Conditions:
+ * - TieredStorage: **Enabled**
+ * - Leader LogStartOffset: **10**
+ * - Leader LocalLogStartOffset: **10** (No deletions).
+ *
+ * Scenario:
+ * - The leader log starts at offset 10 with batches at 10, 150, and 199.
+ * - The follower starts fetching from offset 10.
+ *
+ * Expected Outcomes:
+ * 1. After fetching, the replica log matches the leader:
+ * - LogStartOffset = 10
+ * - LocalLogStartOffset = 10
+ * - LogEndOffset = 200
+ * - HighWatermark = 199
+ */
+ @Test
+ def testEmptyFollowerFetchTieredStorageEnabledLeaderLogStartOffsetNonZeroNoLocalDeletions(): Unit = {
+ val rlmEnabled = true
+ val partition = new TopicPartition("topic1", 0)
+ val mockLeaderEndpoint = new MockLeaderEndPoint(version = version)
+ val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint)
+ val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine)
+
+ val replicaState = emptyReplicaState(rlmEnabled, partition, fetcher)
+
+ val leaderLog = Seq(
+ // LogStartOffset = LocalLogStartOffset = 10
+ mkBatch(baseOffset = 10, leaderEpoch = 0, new SimpleRecord("c".getBytes)),
+ mkBatch(baseOffset = 150, leaderEpoch = 0, new SimpleRecord("d".getBytes)),
+ mkBatch(baseOffset = 199, leaderEpoch = 0, new SimpleRecord("e".getBytes))
+ )
+
+ val leaderState = PartitionState(
+ leaderLog,
+ leaderEpoch = 0,
+ highWatermark = 199L,
+ rlmEnabled = rlmEnabled,
+ )
+ fetcher.mockLeader.setLeaderState(partition, leaderState)
+ fetcher.mockLeader.setReplicaPartitionStateCallback(fetcher.replicaPartitionState)
+
+ fetcher.doWork()
+ assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state))
+ assertEquals(0, replicaState.log.size)
+ assertEquals(10, replicaState.localLogStartOffset)
+ assertEquals(10, replicaState.logEndOffset)
+ assertEquals(Some(10), fetcher.fetchState(partition).map(_.fetchOffset()))
+
+ // Only 1 record batch is returned after a poll so calling 'n' number of times to get the desired result.
+ for (_ <- 1 to 3) fetcher.doWork()
+ assertEquals(3, replicaState.log.size)
+ assertEquals(10, replicaState.logStartOffset)
+ assertEquals(10, replicaState.localLogStartOffset)
+ assertEquals(200, replicaState.logEndOffset)
+ assertEquals(199, replicaState.highWatermark)
+ }
+
+ /**
+ * Test: Empty Follower Fetch with TieredStorage Enabled, Leader LogStartOffset != 0, and Local Deletions
+ *
+ * Purpose:
+ * - Validate follower adjustments when the leader has log deletions causing
+ * LocalLogStartOffset > LogStartOffset.
+ *
+ * Conditions:
+ * - TieredStorage: **Enabled**
+ * - Leader LogStartOffset: **10**
+ * - Leader LocalLogStartOffset: **100** (All older segments deleted locally).
+ *
+ * Scenario:
+ * - The leader log starts at offset 10 but the local log starts at offset 100.
+ * - The follower fetch starts at offset 10 but adjusts for local deletions.
+ *
+ * Expected Outcomes:
+ * 1. Initial fetch offset adjustments:
+ * - First adjustment: LogEndOffset = 10 (after offset-out-of-range error)
+ * - Second adjustment: LogEndOffset = 100 (after offset-moved-to-tiered-storage error)
+ * 2. After successful fetches:
+ * - 3 record batches fetched
+ * - LogStartOffset = 10
+ * - LocalLogStartOffset = 100
+ * - LogEndOffset = 200
+ * - HighWatermark = 199
+ */
+ @Test
+ def testEmptyFollowerFetchTieredStorageEnabledLeaderLogStartOffsetNonZeroWithLocalDeletions(): Unit = {
+ val rlmEnabled = true
+ val partition = new TopicPartition("topic1", 0)
+ val mockLeaderEndpoint = new MockLeaderEndPoint(version = version)
+ val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint)
+ val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine)
+
+ val replicaState = emptyReplicaState(rlmEnabled, partition, fetcher)
+
+ val leaderLog = Seq(
+ // LocalLogStartOffset = 100
+ mkBatch(baseOffset = 100, leaderEpoch = 0, new SimpleRecord("c".getBytes)),
+ mkBatch(baseOffset = 150, leaderEpoch = 0, new SimpleRecord("d".getBytes)),
+ mkBatch(baseOffset = 199, leaderEpoch = 0, new SimpleRecord("e".getBytes))
+ )
+
+ val leaderState = PartitionState(
+ leaderLog,
+ leaderEpoch = 0,
+ highWatermark = 199L,
+ rlmEnabled = rlmEnabled,
+ )
+ leaderState.logStartOffset = 10
+ fetcher.mockLeader.setLeaderState(partition, leaderState)
+ fetcher.mockLeader.setReplicaPartitionStateCallback(fetcher.replicaPartitionState)
+
+ fetcher.doWork()
+ // On offset-out-of-range error, fetch offset is updated
+ assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state))
+ assertEquals(0, replicaState.log.size)
+ assertEquals(10, replicaState.localLogStartOffset)
+ assertEquals(10, replicaState.logEndOffset)
+ assertEquals(Some(10), fetcher.fetchState(partition).map(_.fetchOffset()))
+
+ fetcher.doWork()
+ // On offset-moved-to-tiered-storage error, fetch offset is updated
+ assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state))
+ assertEquals(0, replicaState.log.size)
+ assertEquals(100, replicaState.localLogStartOffset)
+ assertEquals(100, replicaState.logEndOffset)
+ assertEquals(Some(100), fetcher.fetchState(partition).map(_.fetchOffset()))
+
+ // Only 1 record batch is returned after a poll so calling 'n' number of times to get the desired result.
+ for (_ <- 1 to 3) fetcher.doWork()
+ assertEquals(3, replicaState.log.size)
+ assertEquals(10, replicaState.logStartOffset)
+ assertEquals(100, replicaState.localLogStartOffset)
+ assertEquals(200, replicaState.logEndOffset)
+ assertEquals(199, replicaState.highWatermark)
+ }
+
+ /**
+ * Test: Empty Follower Fetch with TieredStorage Enabled, All Local Segments Deleted
+ *
+ * Purpose:
+ * - Handle scenarios where all local segments have been deleted:
+ * - LocalLogStartOffset > LogStartOffset.
+ * - LocalLogStartOffset = LogEndOffset.
+ *
+ * Conditions:
+ * - TieredStorage: **Enabled**
+ * - Leader LogStartOffset: **0 or > 0**
+ * - Leader LocalLogStartOffset: Leader LogEndOffset (all segments deleted locally).
+ *
+ * Expected Outcomes:
+ * 1. Follower state is adjusted to reflect local deletions:
+ * - LocalLogStartOffset = LogEndOffset.
+ * - No new data remains to fetch.
+ */
+ @Test
+ def testEmptyFollowerFetchTieredStorageEnabledLeaderLogStartOffsetZeroAllLocalSegmentsDeleted(): Unit = {
+ val rlmEnabled = true
+ val partition = new TopicPartition("topic1", 0)
+ val mockLeaderEndpoint = new MockLeaderEndPoint(version = version)
+ val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint)
+ val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine)
+
+ val replicaState = emptyReplicaState(rlmEnabled, partition, fetcher)
+
+ val leaderLog = Seq(
+ // LocalLogStartOffset = 100
+ mkBatch(baseOffset = 100, leaderEpoch = 0, new SimpleRecord("c".getBytes)),
+ mkBatch(baseOffset = 150, leaderEpoch = 0, new SimpleRecord("d".getBytes)),
+ )
+
+ val leaderState = PartitionState(
+ leaderLog,
+ leaderEpoch = 0,
+ highWatermark = 151L,
+ rlmEnabled = rlmEnabled
+ )
+ leaderState.logStartOffset = 0
+ // Set Local Log Start Offset to Log End Offset
+ leaderState.localLogStartOffset = 151
+ fetcher.mockLeader.setLeaderState(partition, leaderState)
+ fetcher.mockLeader.setReplicaPartitionStateCallback(fetcher.replicaPartitionState)
+
+ fetcher.doWork()
+
+ // On offset-moved-to-tiered-storage error, fetch offset is updated
+ fetcher.doWork()
+ assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state))
+ assertEquals(0, replicaState.log.size)
+ assertEquals(151, replicaState.localLogStartOffset)
+ assertEquals(151, replicaState.logEndOffset)
+ assertEquals(151, replicaState.highWatermark)
+ assertEquals(Some(151), fetcher.fetchState(partition).map(_.fetchOffset()))
+
+ // Call once again to see if new data is received
+ fetcher.doWork()
+ // No metadata update expected
+ assertEquals(0, replicaState.log.size)
+ assertEquals(0, replicaState.logStartOffset)
+ assertEquals(151, replicaState.localLogStartOffset)
+ assertEquals(151, replicaState.logEndOffset)
+ assertEquals(151, replicaState.highWatermark)
+ }
+
+ /**
+ * Test: Empty Follower Fetch with TieredStorage Enabled, Leader LogStartOffset != 0, and All Local Segments Deleted
+ *
+ * Purpose:
+ * - Validate follower behavior when TieredStorage is enabled, the leader's log starts at a non-zero offset,
+ * and all local log segments have been deleted.
+ *
+ * Conditions:
+ * - TieredStorage: **Enabled**
+ * - Leader LogStartOffset: **10**
+ * - Leader LocalLogStartOffset: **151** (all older segments deleted locally).
+ *
+ * Scenario:
+ * - The leader log contains record batches from offset 100, but all local segments up to offset 151 are deleted.
+ * - The follower starts at LogStartOffset = 10 and adjusts for local segment deletions.
+ *
+ * Expected Outcomes:
+ * 1. Follower detects offset adjustments due to local deletions:
+ * - LogStartOffset remains 10.
+ * - LocalLogStartOffset updates to 151.
+ * - LogEndOffset updates to 151.
+ * 2. HighWatermark aligns with the leader (151).
+ * 3. No new data is fetched since all relevant segments are deleted.
+ */
+ @Test
+ def testEmptyFollowerFetchTieredStorageEnabledLeaderLogStartOffsetNonZeroAllLocalSegmentsDeleted(): Unit = {
+ val rlmEnabled = true
+ val partition = new TopicPartition("topic1", 0)
+ val mockLeaderEndpoint = new MockLeaderEndPoint(version = version)
+ val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint)
+ val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine)
+
+ val replicaState = emptyReplicaState(rlmEnabled, partition, fetcher)
+
+ val leaderLog = Seq(
+ // LocalLogStartOffset = 100
+ mkBatch(baseOffset = 100, leaderEpoch = 0, new SimpleRecord("c".getBytes)),
+ mkBatch(baseOffset = 150, leaderEpoch = 0, new SimpleRecord("d".getBytes)),
+ )
+
+ val leaderState = PartitionState(
+ leaderLog,
+ leaderEpoch = 0,
+ highWatermark = 151L,
+ rlmEnabled = rlmEnabled
+ )
+ leaderState.logStartOffset = 10
+ // Set Local Log Start Offset to Log End Offset
+ leaderState.localLogStartOffset = 151
+ fetcher.mockLeader.setLeaderState(partition, leaderState)
+ fetcher.mockLeader.setReplicaPartitionStateCallback(fetcher.replicaPartitionState)
+
+ fetcher.doWork()
+
+ // On offset-out-of-range error, fetch offset is updated
+ assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state))
+ assertEquals(0, replicaState.log.size)
+ assertEquals(10, replicaState.localLogStartOffset)
+ assertEquals(10, replicaState.logEndOffset)
+ assertEquals(Some(10), fetcher.fetchState(partition).map(_.fetchOffset()))
+
+ // On offset-moved-to-tiered-storage error, fetch offset is updated
+ fetcher.doWork()
+ assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state))
+ assertEquals(0, replicaState.log.size)
+ assertEquals(151, replicaState.localLogStartOffset)
+ assertEquals(151, replicaState.logEndOffset)
+ assertEquals(151, replicaState.highWatermark)
+ assertEquals(Some(151), fetcher.fetchState(partition).map(_.fetchOffset()))
+
+ // Call once again to see if new data is received
+ fetcher.doWork()
+ // No metadata update expected
+ assertEquals(0, replicaState.log.size)
+ assertEquals(10, replicaState.logStartOffset)
+ assertEquals(151, replicaState.localLogStartOffset)
+ assertEquals(151, replicaState.logEndOffset)
+ assertEquals(151, replicaState.highWatermark)
+ }
}
\ No newline at end of file
diff --git a/docker/README.md b/docker/README.md
index 9c2916d9293..c4b9d49d0ea 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -130,6 +130,10 @@ python docker_build_test.py kafka/test --image-tag=3.6.0 --image-type=jvm --kafk
```
python docker_build_test.py kafka/test --image-tag=3.8.0 --image-type=native --kafka-url=https://archive.apache.org/dist/kafka/3.8.0/kafka_2.13-3.8.0.tgz
```
+- Example(local build archive with jvm or native image type) :- To build and test an image named test with local build archive
+```
+python docker_build_test.py kafka/test --image-tag=local-build --image-type= --kafka-archive=
+```
Creating a Release Candidate
----------------------------
diff --git a/docker/common.py b/docker/common.py
index 9c0f901823f..f04a484a187 100644
--- a/docker/common.py
+++ b/docker/common.py
@@ -33,12 +33,14 @@ def get_input(message):
raise ValueError("This field cannot be empty")
return value
-def build_docker_image_runner(command, image_type):
+def build_docker_image_runner(command, image_type, kafka_archive=None):
temp_dir_path = tempfile.mkdtemp()
current_dir = os.path.dirname(os.path.realpath(__file__))
copy_tree(f"{current_dir}/{image_type}", f"{temp_dir_path}/{image_type}")
copy_tree(f"{current_dir}/resources", f"{temp_dir_path}/{image_type}/resources")
copy_file(f"{current_dir}/server.properties", f"{temp_dir_path}/{image_type}")
+ if kafka_archive:
+ copy_file(kafka_archive, f"{temp_dir_path}/{image_type}/kafka.tgz")
command = command.replace("$DOCKER_FILE", f"{temp_dir_path}/{image_type}/Dockerfile")
command = command.replace("$DOCKER_DIR", f"{temp_dir_path}/{image_type}")
try:
diff --git a/docker/docker_build_test.py b/docker/docker_build_test.py
index 793148573f3..fab6e65263d 100755
--- a/docker/docker_build_test.py
+++ b/docker/docker_build_test.py
@@ -25,9 +25,11 @@ Usage:
Example command:-
docker_build_test.py --image-tag --image-type --kafka-url
+ docker_build_test.py --image-tag --image-type --kafka-archive
This command will build an image with as image name, as image_tag (it will be latest by default),
as image type (jvm by default), for the kafka inside the image and run tests on the image.
+ can be passed as an alternative to to use a local kafka archive. The path of kafka_archive should be absolute.
-b can be passed as additional argument if you just want to build the image.
-t can be passed if you just want to run tests on the image.
"""
@@ -41,10 +43,6 @@ from common import execute, build_docker_image_runner
import tempfile
import os
-def build_docker_image(image, tag, kafka_url, image_type):
- image = f'{image}:{tag}'
- build_docker_image_runner(f"docker build -f $DOCKER_FILE -t {image} --build-arg kafka_url={kafka_url} --build-arg build_date={date.today()} $DOCKER_DIR", image_type)
-
def run_docker_tests(image, tag, kafka_url, image_type):
temp_dir_path = tempfile.mkdtemp()
try:
@@ -69,16 +67,20 @@ if __name__ == '__main__':
parser.add_argument("image", help="Image name that you want to keep for the Docker image")
parser.add_argument("--image-tag", "-tag", default="latest", dest="tag", help="Image tag that you want to add to the image")
parser.add_argument("--image-type", "-type", choices=["jvm", "native"], default="jvm", dest="image_type", help="Image type you want to build")
- parser.add_argument("--kafka-url", "-u", dest="kafka_url", help="Kafka url to be used to download kafka binary tarball in the docker image")
parser.add_argument("--build", "-b", action="store_true", dest="build_only", default=False, help="Only build the image, don't run tests")
parser.add_argument("--test", "-t", action="store_true", dest="test_only", default=False, help="Only run the tests, don't build the image")
+
+ archive_group = parser.add_mutually_exclusive_group(required=True)
+ archive_group.add_argument("--kafka-url", "-u", dest="kafka_url", help="Kafka url to be used to download kafka binary tarball in the docker image")
+ archive_group.add_argument("--kafka-archive", "-a", dest="kafka_archive", help="Kafka archive to be used to extract kafka binary tarball in the docker image")
+
args = parser.parse_args()
if args.build_only or not (args.build_only or args.test_only):
if args.kafka_url:
- build_docker_image(args.image, args.tag, args.kafka_url, args.image_type)
- else:
- raise ValueError("--kafka-url is a required argument for docker image")
+ build_docker_image_runner(f"docker build -f $DOCKER_FILE -t {args.image}:{args.tag} --build-arg kafka_url={args.kafka_url} --build-arg build_date={date.today()} --no-cache --progress=plain $DOCKER_DIR", args.image_type)
+ elif args.kafka_archive:
+ build_docker_image_runner(f"docker build -f $DOCKER_FILE -t {args.image}:{args.tag} --build-arg build_date={date.today()} --no-cache --progress=plain $DOCKER_DIR", args.image_type, args.kafka_archive)
if args.test_only or not (args.build_only or args.test_only):
run_docker_tests(args.image, args.tag, args.kafka_url, args.image_type)
diff --git a/docker/jvm/Dockerfile b/docker/jvm/Dockerfile
index f98f50a2e03..3d2f06820d4 100644
--- a/docker/jvm/Dockerfile
+++ b/docker/jvm/Dockerfile
@@ -23,20 +23,27 @@ USER root
# Get kafka from https://archive.apache.org/dist/kafka and pass the url through build arguments
ARG kafka_url
+ENV KAFKA_URL=$kafka_url
+
COPY jsa_launch /etc/kafka/docker/jsa_launch
COPY server.properties /etc/kafka/docker/server.properties
+COPY *kafka.tgz kafka.tgz
+
RUN set -eux ; \
apk update ; \
apk upgrade ; \
- apk add --no-cache wget gcompat gpg gpg-agent procps bash; \
+ apk add --no-cache bash; \
+ if [ -n "$KAFKA_URL" ]; then \
+ apk add --no-cache wget gcompat gpg gpg-agent procps; \
+ wget -nv -O kafka.tgz "$KAFKA_URL"; \
+ wget -nv -O kafka.tgz.asc "$KAFKA_URL.asc"; \
+ wget -nv -O KEYS https://downloads.apache.org/kafka/KEYS; \
+ gpg --import KEYS; \
+ gpg --batch --verify kafka.tgz.asc kafka.tgz; \
+ fi; \
mkdir opt/kafka; \
- wget -nv -O kafka.tgz "$kafka_url"; \
- wget -nv -O kafka.tgz.asc "$kafka_url.asc"; \
- tar xfz kafka.tgz -C /opt/kafka --strip-components 1; \
- wget -nv -O KEYS https://downloads.apache.org/kafka/KEYS; \
- gpg --import KEYS; \
- gpg --batch --verify kafka.tgz.asc kafka.tgz
+ tar xfz kafka.tgz -C opt/kafka --strip-components 1;
# Generate jsa files using dynamic CDS for kafka server start command and kafka storage format command
RUN /etc/kafka/docker/jsa_launch
@@ -53,6 +60,9 @@ USER root
ARG kafka_url
ARG build_date
+ENV KAFKA_URL=$kafka_url
+
+COPY *kafka.tgz kafka.tgz
LABEL org.label-schema.name="kafka" \
org.label-schema.description="Apache Kafka" \
@@ -60,17 +70,25 @@ LABEL org.label-schema.name="kafka" \
org.label-schema.vcs-url="https://github.com/apache/kafka" \
maintainer="Apache Kafka"
-RUN set -eux ; \
+RUN mkdir opt/kafka; \
+ set -eux ; \
apk update ; \
apk upgrade ; \
- apk add --no-cache wget gcompat gpg gpg-agent procps bash; \
- mkdir opt/kafka; \
- wget -nv -O kafka.tgz "$kafka_url"; \
- wget -nv -O kafka.tgz.asc "$kafka_url.asc"; \
- tar xfz kafka.tgz -C /opt/kafka --strip-components 1; \
- wget -nv -O KEYS https://downloads.apache.org/kafka/KEYS; \
- gpg --import KEYS; \
- gpg --batch --verify kafka.tgz.asc kafka.tgz; \
+ apk add --no-cache bash; \
+ if [ -n "$KAFKA_URL" ]; then \
+ apk add --no-cache wget gcompat gpg gpg-agent procps; \
+ wget -nv -O kafka.tgz "$KAFKA_URL"; \
+ wget -nv -O kafka.tgz.asc "$KAFKA_URL.asc"; \
+ tar xfz kafka.tgz -C /opt/kafka --strip-components 1; \
+ wget -nv -O KEYS https://downloads.apache.org/kafka/KEYS; \
+ gpg --import KEYS; \
+ gpg --batch --verify kafka.tgz.asc kafka.tgz; \
+ rm kafka.tgz kafka.tgz.asc KEYS; \
+ apk del wget gpg gpg-agent; \
+ else \
+ tar xfz kafka.tgz -C /opt/kafka --strip-components 1; \
+ rm kafka.tgz; \
+ fi; \
mkdir -p /var/lib/kafka/data /etc/kafka/secrets; \
mkdir -p /etc/kafka/docker /usr/logs /mnt/shared/config; \
adduser -h /home/appuser -D --shell /bin/bash appuser; \
@@ -79,8 +97,6 @@ RUN set -eux ; \
chmod -R ug+w /etc/kafka /var/lib/kafka /etc/kafka/secrets; \
cp /opt/kafka/config/log4j2.yaml /etc/kafka/docker/log4j2.yaml; \
cp /opt/kafka/config/tools-log4j2.yaml /etc/kafka/docker/tools-log4j2.yaml; \
- rm kafka.tgz kafka.tgz.asc KEYS; \
- apk del wget gpg gpg-agent; \
apk cache clean;
COPY server.properties /etc/kafka/docker/server.properties
diff --git a/docker/native/Dockerfile b/docker/native/Dockerfile
index ca85f35562d..010edbcd51c 100644
--- a/docker/native/Dockerfile
+++ b/docker/native/Dockerfile
@@ -29,15 +29,18 @@ ENV TARGET_PATH="$KAFKA_DIR/kafka.Kafka"
COPY native-image-configs $NATIVE_CONFIGS_DIR
COPY native_command.sh native_command.sh
-RUN mkdir $KAFKA_DIR; \
- microdnf install wget; \
- wget -nv -O kafka.tgz "$KAFKA_URL"; \
- wget -nv -O kafka.tgz.asc "$KAFKA_URL.asc"; \
+COPY *kafka.tgz /app
+
+RUN if [ -n "$KAFKA_URL" ]; then \
+ microdnf install wget; \
+ wget -nv -O kafka.tgz "$KAFKA_URL"; \
+ wget -nv -O kafka.tgz.asc "$KAFKA_URL.asc"; \
+ wget -nv -O KEYS https://downloads.apache.org/kafka/KEYS; \
+ gpg --import KEYS; \
+ gpg --batch --verify kafka.tgz.asc kafka.tgz; \
+ fi; \
+ mkdir $KAFKA_DIR; \
tar xfz kafka.tgz -C $KAFKA_DIR --strip-components 1; \
- wget -nv -O KEYS https://downloads.apache.org/kafka/KEYS; \
- gpg --import KEYS; \
- gpg --batch --verify kafka.tgz.asc kafka.tgz; \
- rm kafka.tgz ; \
# Build the native-binary of the apache kafka using graalVM native-image.
/app/native_command.sh $NATIVE_IMAGE_PATH $NATIVE_CONFIGS_DIR $KAFKA_LIBS_DIR $TARGET_PATH
diff --git a/docs/ops.html b/docs/ops.html
index 61007a30b8f..d6c0982854a 100644
--- a/docs/ops.html
+++ b/docs/ops.html
@@ -4116,11 +4116,9 @@ Feature: metadata.version SupportedMinVersion: 3.3-IV3 SupportedMaxVers
$ bin/kafka-storage.sh format -t KAFKA_CLUSTER_ID --feature kraft.version=1 -c controller.properties
- Cannot set kraft.version to 1 unless KIP-853 configuration is present. Try removing the --feature flag for kraft.version.
- Note: Currently it is not possible to convert clusters using a static controller quorum to
- use a dynamic controller quorum. This function will be supported in the future release.
+ Note: To migrate from static voter set to dynamic voter set, please refer to the Upgrade section.
If a dynamic controller cluster already exists, it can be expanded by first provisioning a new controller using the kafka-storage.sh tool and starting the controller.
@@ -4318,7 +4316,7 @@ $ bin/kafka-topics.sh --create --topic tieredTopic --bootstrap-server localhost:
Try to send messages to the `tieredTopic` topic to roll the log segment:
-$ bin/kafka-producer-perf-test.sh --topic tieredTopic --num-records 1200 --record-size 1024 --throughput -1 --producer-props bootstrap.servers=localhost:9092
+$ bin/kafka-producer-perf-test.sh --bootstrap-server localhost:9092 --topic tieredTopic --num-records 1200 --record-size 1024 --throughput -1
Then, after the active segment is rolled, the old segment should be moved to the remote storage and get deleted.
This can be verified by checking the remote log directory configured above. For example:
diff --git a/docs/upgrade.html b/docs/upgrade.html
index f0f3f540738..1dbb7e2d2ee 100644
--- a/docs/upgrade.html
+++ b/docs/upgrade.html
@@ -40,6 +40,10 @@
The PARTITIONER_ADPATIVE_PARTITIONING_ENABLE_CONFIG
in ProducerConfig
was deprecated and will be removed in Kafka 5.0. Please use the PARTITIONER_ADAPTIVE_PARTITIONING_ENABLE_CONFIG
instead.
+
+ The ConsumerPerformance
command line tool has a new include
option that is alternative to the topic
option.
+ This new option allows to pass a regular expression specifying a list of topics to include for consumption, which is useful to test consumer performance across multiple topics or dynamically matching topic sets.
+
diff --git a/gradle/dependencies.gradle b/gradle/dependencies.gradle
index bd957d7f8a7..6d333ab03f6 100644
--- a/gradle/dependencies.gradle
+++ b/gradle/dependencies.gradle
@@ -105,7 +105,7 @@ versions += [
kafka_38: "3.8.1",
kafka_39: "3.9.1",
kafka_40: "4.0.0",
- log4j2: "2.24.3",
+ log4j2: "2.25.1",
// When updating lz4 make sure the compression levels in org.apache.kafka.common.record.CompressionType are still valid
lz4: "1.8.0",
mavenArtifact: "3.9.6",
diff --git a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupMetadataManager.java b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupMetadataManager.java
index 44380427284..f87af4897a7 100644
--- a/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupMetadataManager.java
+++ b/group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupMetadataManager.java
@@ -833,19 +833,28 @@ public class GroupMetadataManager {
* Gets or creates a streams group without updating the groups map.
* The group will be materialized during the replay.
*
+ * If there is an empty classic consumer group of the same name, it will be deleted and a new streams
+ * group will be created.
+ *
* @param groupId The group ID.
+ * @param records The record list to which the group tombstones are written
+ * if the group is empty and is a classic group.
*
* @return A StreamsGroup.
*
* Package private for testing.
*/
StreamsGroup getOrCreateStreamsGroup(
- String groupId
+ String groupId,
+ List records
) {
Group group = groups.get(groupId);
if (group == null) {
return new StreamsGroup(logContext, snapshotRegistry, groupId, metrics);
+ } else if (maybeDeleteEmptyClassicGroup(group, records)) {
+ log.info("[GroupId {}] Converted the empty classic group to a streams group.", groupId);
+ return new StreamsGroup(logContext, snapshotRegistry, groupId, metrics);
} else {
return castToStreamsGroup(group);
}
@@ -1871,7 +1880,7 @@ public class GroupMetadataManager {
boolean isJoining = memberEpoch == 0;
StreamsGroup group;
if (isJoining) {
- group = getOrCreateStreamsGroup(groupId);
+ group = getOrCreateStreamsGroup(groupId, records);
throwIfStreamsGroupIsFull(group);
} else {
group = getStreamsGroupOrThrow(groupId);
@@ -6066,7 +6075,11 @@ public class GroupMetadataManager {
// classicGroupJoinToConsumerGroup takes the join requests to non-empty consumer groups.
// The empty consumer groups should be converted to classic groups in classicGroupJoinToClassicGroup.
return classicGroupJoinToConsumerGroup((ConsumerGroup) group, context, request, responseFuture);
- } else if (group.type() == CONSUMER || group.type() == CLASSIC) {
+ } else if (group.type() == CONSUMER || group.type() == CLASSIC || group.type() == STREAMS && group.isEmpty()) {
+ // classicGroupJoinToClassicGroup accepts:
+ // - classic groups
+ // - empty streams groups
+ // - empty consumer groups
return classicGroupJoinToClassicGroup(context, request, responseFuture);
} else {
// Group exists but it's not a consumer group
@@ -6107,6 +6120,8 @@ public class GroupMetadataManager {
ClassicGroup group;
if (maybeDeleteEmptyConsumerGroup(groupId, records)) {
log.info("[GroupId {}] Converted the empty consumer group to a classic group.", groupId);
+ } else if (maybeDeleteEmptyStreamsGroup(groupId, records)) {
+ log.info("[GroupId {}] Converted the empty streams group to a classic group.", groupId);
}
boolean isNewGroup = !groups.containsKey(groupId);
try {
@@ -8398,6 +8413,13 @@ public class GroupMetadataManager {
return group != null && group.type() == CONSUMER && group.isEmpty();
}
+ /**
+ * @return true if the group is an empty streams group.
+ */
+ private static boolean isEmptyStreamsGroup(Group group) {
+ return group != null && group.type() == STREAMS && group.isEmpty();
+ }
+
/**
* Write tombstones for the group if it's empty and is a classic group.
*
@@ -8435,6 +8457,26 @@ public class GroupMetadataManager {
}
return false;
}
+
+ /**
+ * Delete and write tombstones for the group if it's empty and is a streams group.
+ *
+ * @param groupId The group id to be deleted.
+ * @param records The list of records to delete the group.
+ *
+ * @return true if the group is an empty streams group.
+ */
+ private boolean maybeDeleteEmptyStreamsGroup(String groupId, List records) {
+ Group group = groups.get(groupId, Long.MAX_VALUE);
+ if (isEmptyStreamsGroup(group)) {
+ // Add tombstones for the previous streams group. The tombstones won't actually be
+ // replayed because its coordinator result has a non-null appendFuture.
+ createGroupTombstoneRecords(group, records);
+ removeGroup(groupId);
+ return true;
+ }
+ return false;
+ }
/**
* Checks whether the given protocol type or name in the request is inconsistent with the group's.
diff --git a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupMetadataManagerTest.java b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupMetadataManagerTest.java
index 3f1e49b955d..efe2ad96435 100644
--- a/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupMetadataManagerTest.java
+++ b/group-coordinator/src/test/java/org/apache/kafka/coordinator/group/GroupMetadataManagerTest.java
@@ -18633,6 +18633,156 @@ public class GroupMetadataManagerTest {
assertNull(result.response().data().partitionsByUserEndpoint());
}
+ @Test
+ public void testStreamsGroupHeartbeatWithNonEmptyClassicGroup() {
+ String classicGroupId = "classic-group-id";
+ String memberId = Uuid.randomUuid().toString();
+
+ String subtopology1 = "subtopology1";
+ String fooTopicName = "foo";
+ Topology topology = new Topology().setSubtopologies(List.of(
+ new Subtopology().setSubtopologyId(subtopology1).setSourceTopics(List.of(fooTopicName))
+ ));
+
+ GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder().build();
+ ClassicGroup classicGroup = new ClassicGroup(
+ new LogContext(),
+ classicGroupId,
+ EMPTY,
+ context.time
+ );
+ context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(classicGroup, classicGroup.groupAssignment()));
+
+ context.groupMetadataManager.getOrMaybeCreateClassicGroup(classicGroupId, false).transitionTo(PREPARING_REBALANCE);
+ assertThrows(GroupIdNotFoundException.class, () ->
+ context.streamsGroupHeartbeat(
+ new StreamsGroupHeartbeatRequestData()
+ .setGroupId(classicGroupId)
+ .setMemberId(memberId)
+ .setMemberEpoch(0)
+ .setRebalanceTimeoutMs(12000)
+ .setTopology(topology)
+ .setActiveTasks(List.of())
+ .setStandbyTasks(List.of())
+ .setWarmupTasks(List.of())));
+ }
+
+ @Test
+ public void testStreamsGroupHeartbeatWithEmptyClassicGroup() {
+ String classicGroupId = "classic-group-id";
+ String memberId = Uuid.randomUuid().toString();
+ String fooTopicName = "foo";
+ String subtopology1 = "subtopology1";
+ Topology topology = new Topology().setSubtopologies(List.of(
+ new Subtopology().setSubtopologyId(subtopology1).setSourceTopics(List.of(fooTopicName))
+ ));
+
+ MockTaskAssignor assignor = new MockTaskAssignor("sticky");
+ GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder()
+ .withStreamsGroupTaskAssignors(List.of(assignor))
+ .build();
+ ClassicGroup classicGroup = new ClassicGroup(
+ new LogContext(),
+ classicGroupId,
+ EMPTY,
+ context.time
+ );
+ context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(classicGroup, classicGroup.groupAssignment()));
+
+ CoordinatorResult result = context.streamsGroupHeartbeat(
+ new StreamsGroupHeartbeatRequestData()
+ .setGroupId(classicGroupId)
+ .setMemberId(memberId)
+ .setMemberEpoch(0)
+ .setRebalanceTimeoutMs(12000)
+ .setTopology(topology)
+ .setActiveTasks(List.of())
+ .setStandbyTasks(List.of())
+ .setWarmupTasks(List.of()));
+
+ StreamsGroupMember expectedMember = StreamsGroupMember.Builder.withDefaults(memberId)
+ .setState(org.apache.kafka.coordinator.group.streams.MemberState.STABLE)
+ .setMemberEpoch(1)
+ .setPreviousMemberEpoch(0)
+ .setRebalanceTimeoutMs(5000)
+ .setClientId(DEFAULT_CLIENT_ID)
+ .setClientHost(DEFAULT_CLIENT_ADDRESS.toString())
+ .setAssignedTasks(TasksTuple.EMPTY)
+ .setTasksPendingRevocation(TasksTuple.EMPTY)
+ .setRebalanceTimeoutMs(12000)
+ .setTopologyEpoch(0)
+ .build();
+
+ assertEquals(Errors.NONE.code(), result.response().data().errorCode());
+ assertEquals(
+ List.of(
+ GroupCoordinatorRecordHelpers.newGroupMetadataTombstoneRecord(classicGroupId),
+ StreamsCoordinatorRecordHelpers.newStreamsGroupMemberRecord(classicGroupId, expectedMember),
+ StreamsCoordinatorRecordHelpers.newStreamsGroupTopologyRecord(classicGroupId, topology),
+ StreamsCoordinatorRecordHelpers.newStreamsGroupEpochRecord(classicGroupId, 1, 0),
+ StreamsCoordinatorRecordHelpers.newStreamsGroupTargetAssignmentRecord(classicGroupId, memberId, TasksTuple.EMPTY),
+ StreamsCoordinatorRecordHelpers.newStreamsGroupTargetAssignmentEpochRecord(classicGroupId, 1),
+ StreamsCoordinatorRecordHelpers.newStreamsGroupCurrentAssignmentRecord(classicGroupId, expectedMember)
+ ),
+ result.records()
+ );
+ assertEquals(
+ Group.GroupType.STREAMS,
+ context.groupMetadataManager.streamsGroup(classicGroupId).type()
+ );
+ }
+
+ @Test
+ public void testClassicGroupJoinWithEmptyStreamsGroup() throws Exception {
+ String streamsGroupId = "streams-group-id";
+ GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder()
+ .withStreamsGroup(new StreamsGroupBuilder(streamsGroupId, 10))
+ .build();
+
+ JoinGroupRequestData request = new GroupMetadataManagerTestContext.JoinGroupRequestBuilder()
+ .withGroupId(streamsGroupId)
+ .withMemberId(UNKNOWN_MEMBER_ID)
+ .withDefaultProtocolTypeAndProtocols()
+ .build();
+ GroupMetadataManagerTestContext.JoinResult joinResult = context.sendClassicGroupJoin(request, true);
+
+ List expectedRecords = List.of(
+ StreamsCoordinatorRecordHelpers.newStreamsGroupTargetAssignmentEpochTombstoneRecord(streamsGroupId),
+ StreamsCoordinatorRecordHelpers.newStreamsGroupEpochTombstoneRecord(streamsGroupId),
+ StreamsCoordinatorRecordHelpers.newStreamsGroupTopologyRecordTombstone(streamsGroupId)
+ );
+
+ assertEquals(Errors.MEMBER_ID_REQUIRED.code(), joinResult.joinFuture.get().errorCode());
+ assertEquals(expectedRecords, joinResult.records.subList(0, expectedRecords.size()));
+ assertEquals(
+ Group.GroupType.CLASSIC,
+ context.groupMetadataManager.getOrMaybeCreateClassicGroup(streamsGroupId, false).type()
+ );
+ }
+
+ @Test
+ public void testClassicGroupJoinWithNonEmptyStreamsGroup() throws Exception {
+ String streamsGroupId = "streams-group-id";
+ String memberId = Uuid.randomUuid().toString();
+ GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder()
+ .withStreamsGroup(new StreamsGroupBuilder(streamsGroupId, 10)
+ .withMember(StreamsGroupMember.Builder.withDefaults(memberId)
+ .setState(org.apache.kafka.coordinator.group.streams.MemberState.STABLE)
+ .setMemberEpoch(10)
+ .setPreviousMemberEpoch(10)
+ .build()))
+ .build();
+
+ JoinGroupRequestData request = new GroupMetadataManagerTestContext.JoinGroupRequestBuilder()
+ .withGroupId(streamsGroupId)
+ .withMemberId(UNKNOWN_MEMBER_ID)
+ .withDefaultProtocolTypeAndProtocols()
+ .build();
+
+ GroupMetadataManagerTestContext.JoinResult joinResult = context.sendClassicGroupJoin(request);
+ assertEquals(Errors.INCONSISTENT_GROUP_PROTOCOL.code(), joinResult.joinFuture.get().errorCode());
+ }
+
@Test
public void testConsumerGroupDynamicConfigs() {
String groupId = "fooup";
diff --git a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/assignor/StreamsStickyAssignorBenchmark.java b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/assignor/StreamsStickyAssignorBenchmark.java
index 3293c0223f2..22863fef5fa 100644
--- a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/assignor/StreamsStickyAssignorBenchmark.java
+++ b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/assignor/StreamsStickyAssignorBenchmark.java
@@ -44,7 +44,6 @@ import org.openjdk.jmh.annotations.Threads;
import org.openjdk.jmh.annotations.Warmup;
import org.openjdk.jmh.infra.Blackhole;
-import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -107,7 +106,7 @@ public class StreamsStickyAssignorBenchmark {
taskAssignor = new StickyTaskAssignor();
Map members = createMembers();
- this.assignmentConfigs = Collections.singletonMap(
+ this.assignmentConfigs = Map.of(
"num.standby.replicas",
Integer.toString(standbyReplicas)
);
@@ -138,7 +137,7 @@ public class StreamsStickyAssignorBenchmark {
for (Map.Entry member : groupSpec.members().entrySet()) {
MemberAssignment memberAssignment = members.getOrDefault(
member.getKey(),
- new MemberAssignment(Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap())
+ new MemberAssignment(Map.of(), Map.of(), Map.of())
);
updatedMemberSpec.put(member.getKey(), new AssignmentMemberSpec(
diff --git a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/core/TestPurgatoryPerformance.java b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/core/TestPurgatoryPerformance.java
index 68f3a52918c..e0fd3d8c8d4 100644
--- a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/core/TestPurgatoryPerformance.java
+++ b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/core/TestPurgatoryPerformance.java
@@ -30,7 +30,6 @@ import java.lang.reflect.InvocationTargetException;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
-import java.util.Objects;
import java.util.Optional;
import java.util.Random;
import java.util.concurrent.CountDownLatch;
@@ -405,12 +404,7 @@ public class TestPurgatoryPerformance {
}
- private static class Scheduled implements Delayed {
- final FakeOperation operation;
-
- public Scheduled(FakeOperation operation) {
- this.operation = operation;
- }
+ private record Scheduled(FakeOperation operation) implements Delayed {
@Override
public long getDelay(TimeUnit unit) {
@@ -429,30 +423,11 @@ public class TestPurgatoryPerformance {
}
}
- private static class FakeOperationKey implements DelayedOperationKey {
- private final String key;
-
- public FakeOperationKey(String key) {
- this.key = key;
- }
-
+ private record FakeOperationKey(String key) implements DelayedOperationKey {
@Override
public String keyLabel() {
return key;
}
-
- @Override
- public boolean equals(Object o) {
- if (this == o) return true;
- if (o == null || getClass() != o.getClass()) return false;
- FakeOperationKey that = (FakeOperationKey) o;
- return Objects.equals(key, that.key);
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(key);
- }
}
private static class FakeOperation extends DelayedOperation {
@@ -469,7 +444,6 @@ public class TestPurgatoryPerformance {
@Override
public void onExpiration() {
-
}
@Override
diff --git a/metadata/src/main/java/org/apache/kafka/controller/BrokerControlStates.java b/metadata/src/main/java/org/apache/kafka/controller/BrokerControlStates.java
index b3b4dc9414f..8114c9c6f21 100644
--- a/metadata/src/main/java/org/apache/kafka/controller/BrokerControlStates.java
+++ b/metadata/src/main/java/org/apache/kafka/controller/BrokerControlStates.java
@@ -17,39 +17,5 @@
package org.apache.kafka.controller;
-import java.util.Objects;
-
-
-class BrokerControlStates {
- private final BrokerControlState current;
- private final BrokerControlState next;
-
- BrokerControlStates(BrokerControlState current, BrokerControlState next) {
- this.current = current;
- this.next = next;
- }
-
- BrokerControlState current() {
- return current;
- }
-
- BrokerControlState next() {
- return next;
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(current, next);
- }
-
- @Override
- public boolean equals(Object o) {
- if (!(o instanceof BrokerControlStates other)) return false;
- return other.current == current && other.next == next;
- }
-
- @Override
- public String toString() {
- return "BrokerControlStates(current=" + current + ", next=" + next + ")";
- }
+record BrokerControlStates(BrokerControlState current, BrokerControlState next) {
}
diff --git a/metadata/src/main/java/org/apache/kafka/controller/BrokerIdAndEpoch.java b/metadata/src/main/java/org/apache/kafka/controller/BrokerIdAndEpoch.java
index 7ef75c4e3b3..ddb944ee77c 100644
--- a/metadata/src/main/java/org/apache/kafka/controller/BrokerIdAndEpoch.java
+++ b/metadata/src/main/java/org/apache/kafka/controller/BrokerIdAndEpoch.java
@@ -17,41 +17,5 @@
package org.apache.kafka.controller;
-import java.util.Objects;
-
-public class BrokerIdAndEpoch {
- private final int id;
- private final long epoch;
-
- public BrokerIdAndEpoch(
- int id,
- long epoch
- ) {
- this.id = id;
- this.epoch = epoch;
- }
-
- public int id() {
- return id;
- }
-
- public long epoch() {
- return epoch;
- }
-
- @Override
- public boolean equals(Object o) {
- if (o == null || (!(o instanceof BrokerIdAndEpoch other))) return false;
- return id == other.id && epoch == other.epoch;
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(id, epoch);
- }
-
- @Override
- public String toString() {
- return "BrokerIdAndEpoch(id=" + id + ", epoch=" + epoch + ")";
- }
+public record BrokerIdAndEpoch(int id, long epoch) {
}
diff --git a/metadata/src/main/java/org/apache/kafka/controller/PartitionChangeBuilder.java b/metadata/src/main/java/org/apache/kafka/controller/PartitionChangeBuilder.java
index a1d35293bd1..8697ad00962 100644
--- a/metadata/src/main/java/org/apache/kafka/controller/PartitionChangeBuilder.java
+++ b/metadata/src/main/java/org/apache/kafka/controller/PartitionChangeBuilder.java
@@ -421,8 +421,8 @@ public class PartitionChangeBuilder {
PartitionReassignmentReplicas.CompletedReassignment completedReassignment = completedReassignmentOpt.get();
- targetIsr = completedReassignment.isr;
- targetReplicas = completedReassignment.replicas;
+ targetIsr = completedReassignment.isr();
+ targetReplicas = completedReassignment.replicas();
targetRemoving = List.of();
targetAdding = List.of();
}
diff --git a/metadata/src/main/java/org/apache/kafka/controller/PartitionReassignmentReplicas.java b/metadata/src/main/java/org/apache/kafka/controller/PartitionReassignmentReplicas.java
index 51fdf42bb67..5f85dd1a3d6 100644
--- a/metadata/src/main/java/org/apache/kafka/controller/PartitionReassignmentReplicas.java
+++ b/metadata/src/main/java/org/apache/kafka/controller/PartitionReassignmentReplicas.java
@@ -129,14 +129,7 @@ class PartitionReassignmentReplicas {
);
}
- static class CompletedReassignment {
- final List replicas;
- final List isr;
-
- public CompletedReassignment(List replicas, List isr) {
- this.replicas = replicas;
- this.isr = isr;
- }
+ record CompletedReassignment(List replicas, List isr) {
}
List originalReplicas() {
diff --git a/metadata/src/main/java/org/apache/kafka/controller/ReplicationControlManager.java b/metadata/src/main/java/org/apache/kafka/controller/ReplicationControlManager.java
index fe561244dab..a1e93b3f10f 100644
--- a/metadata/src/main/java/org/apache/kafka/controller/ReplicationControlManager.java
+++ b/metadata/src/main/java/org/apache/kafka/controller/ReplicationControlManager.java
@@ -2441,14 +2441,7 @@ public class ReplicationControlManager {
}
}
- private static final class IneligibleReplica {
- private final int replicaId;
- private final String reason;
-
- private IneligibleReplica(int replicaId, String reason) {
- this.replicaId = replicaId;
- this.reason = reason;
- }
+ private record IneligibleReplica(int replicaId, String reason) {
@Override
public String toString() {
diff --git a/metadata/src/main/java/org/apache/kafka/image/AclsImage.java b/metadata/src/main/java/org/apache/kafka/image/AclsImage.java
index 12f778b841e..86112fa22fb 100644
--- a/metadata/src/main/java/org/apache/kafka/image/AclsImage.java
+++ b/metadata/src/main/java/org/apache/kafka/image/AclsImage.java
@@ -30,14 +30,12 @@ import java.util.Map.Entry;
/**
* Represents the ACLs in the metadata image.
- *
+ *
* This class is thread-safe.
*/
-public final class AclsImage {
+public record AclsImage(Map acls) {
public static final AclsImage EMPTY = new AclsImage(Map.of());
- private final Map acls;
-
public AclsImage(Map acls) {
this.acls = Collections.unmodifiableMap(acls);
}
@@ -46,10 +44,6 @@ public final class AclsImage {
return acls.isEmpty();
}
- public Map acls() {
- return acls;
- }
-
public void write(ImageWriter writer) {
for (Entry entry : acls.entrySet()) {
StandardAclWithId aclWithId = new StandardAclWithId(entry.getKey(), entry.getValue());
@@ -57,17 +51,6 @@ public final class AclsImage {
}
}
- @Override
- public int hashCode() {
- return acls.hashCode();
- }
-
- @Override
- public boolean equals(Object o) {
- if (!(o instanceof AclsImage other)) return false;
- return acls.equals(other.acls);
- }
-
@Override
public String toString() {
return new AclsImageNode(this).stringify();
diff --git a/metadata/src/main/java/org/apache/kafka/image/ClientQuotasImage.java b/metadata/src/main/java/org/apache/kafka/image/ClientQuotasImage.java
index 6cf23a2b688..e5dfb0433d0 100644
--- a/metadata/src/main/java/org/apache/kafka/image/ClientQuotasImage.java
+++ b/metadata/src/main/java/org/apache/kafka/image/ClientQuotasImage.java
@@ -45,14 +45,12 @@ import static org.apache.kafka.common.requests.DescribeClientQuotasRequest.MATCH
/**
* Represents the client quotas in the metadata image.
- *
+ *
* This class is thread-safe.
*/
-public final class ClientQuotasImage {
+public record ClientQuotasImage(Map entities) {
public static final ClientQuotasImage EMPTY = new ClientQuotasImage(Map.of());
- private final Map entities;
-
public ClientQuotasImage(Map entities) {
this.entities = Collections.unmodifiableMap(entities);
}
@@ -61,11 +59,6 @@ public final class ClientQuotasImage {
return entities.isEmpty();
}
- // Visible for testing
- public Map entities() {
- return entities;
- }
-
public void write(ImageWriter writer) {
for (Entry entry : entities.entrySet()) {
ClientQuotaEntity entity = entry.getKey();
@@ -82,14 +75,14 @@ public final class ClientQuotasImage {
if (component.entityType().isEmpty()) {
throw new InvalidRequestException("Invalid empty entity type.");
} else if (exactMatch.containsKey(component.entityType()) ||
- typeMatch.contains(component.entityType())) {
+ typeMatch.contains(component.entityType())) {
throw new InvalidRequestException("Entity type " + component.entityType() +
" cannot appear more than once in the filter.");
}
if (!(component.entityType().equals(IP) || component.entityType().equals(USER) ||
- component.entityType().equals(CLIENT_ID))) {
+ component.entityType().equals(CLIENT_ID))) {
throw new UnsupportedVersionException("Unsupported entity type " +
- component.entityType());
+ component.entityType());
}
switch (component.matchType()) {
case MATCH_TYPE_EXACT:
@@ -119,7 +112,7 @@ public final class ClientQuotasImage {
}
if (exactMatch.containsKey(IP) || typeMatch.contains(IP)) {
if ((exactMatch.containsKey(USER) || typeMatch.contains(USER)) ||
- (exactMatch.containsKey(CLIENT_ID) || typeMatch.contains(CLIENT_ID))) {
+ (exactMatch.containsKey(CLIENT_ID) || typeMatch.contains(CLIENT_ID))) {
throw new InvalidRequestException("Invalid entity filter component " +
"combination. IP filter component should not be used with " +
"user or clientId filter component.");
@@ -173,17 +166,6 @@ public final class ClientQuotasImage {
return data;
}
- @Override
- public boolean equals(Object o) {
- if (!(o instanceof ClientQuotasImage other)) return false;
- return entities.equals(other.entities);
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(entities);
- }
-
@Override
public String toString() {
return new ClientQuotasImageNode(this).stringify();
diff --git a/metadata/src/main/java/org/apache/kafka/image/ClusterImage.java b/metadata/src/main/java/org/apache/kafka/image/ClusterImage.java
index 22166f1c2a3..1e4dbe20d7a 100644
--- a/metadata/src/main/java/org/apache/kafka/image/ClusterImage.java
+++ b/metadata/src/main/java/org/apache/kafka/image/ClusterImage.java
@@ -25,22 +25,16 @@ import org.apache.kafka.metadata.ControllerRegistration;
import java.util.Collections;
import java.util.Map;
-import java.util.Objects;
-
/**
* Represents the cluster in the metadata image.
- *
+ *
* This class is thread-safe.
*/
-public final class ClusterImage {
+public record ClusterImage(Map brokers, Map controllers) {
public static final ClusterImage EMPTY = new ClusterImage(
- Map.of(),
- Map.of());
-
- private final Map brokers;
-
- private final Map controllers;
+ Map.of(),
+ Map.of());
public ClusterImage(
Map brokers,
@@ -54,18 +48,10 @@ public final class ClusterImage {
return brokers.isEmpty();
}
- public Map brokers() {
- return brokers;
- }
-
public BrokerRegistration broker(int nodeId) {
return brokers.get(nodeId);
}
- public Map controllers() {
- return controllers;
- }
-
public long brokerEpoch(int brokerId) {
BrokerRegistration brokerRegistration = broker(brokerId);
if (brokerRegistration == null) {
@@ -89,18 +75,6 @@ public final class ClusterImage {
}
}
- @Override
- public int hashCode() {
- return Objects.hash(brokers, controllers);
- }
-
- @Override
- public boolean equals(Object o) {
- if (!(o instanceof ClusterImage other)) return false;
- return brokers.equals(other.brokers) &&
- controllers.equals(other.controllers);
- }
-
@Override
public String toString() {
return new ClusterImageNode(this).stringify();
diff --git a/metadata/src/main/java/org/apache/kafka/image/ConfigurationImage.java b/metadata/src/main/java/org/apache/kafka/image/ConfigurationImage.java
index c0757d351a7..8147a4b58a8 100644
--- a/metadata/src/main/java/org/apache/kafka/image/ConfigurationImage.java
+++ b/metadata/src/main/java/org/apache/kafka/image/ConfigurationImage.java
@@ -30,29 +30,10 @@ import java.util.Properties;
/**
* Represents the configuration of a resource.
- *
+ *
* This class is thread-safe.
*/
-public final class ConfigurationImage {
- private final ConfigResource resource;
-
- private final Map data;
-
- public ConfigurationImage(
- ConfigResource resource,
- Map data
- ) {
- this.resource = resource;
- this.data = data;
- }
-
- public ConfigResource resource() {
- return resource;
- }
-
- public Map data() {
- return data;
- }
+public record ConfigurationImage(ConfigResource resource, Map data) {
public boolean isEmpty() {
return data.isEmpty();
diff --git a/metadata/src/main/java/org/apache/kafka/image/DelegationTokenImage.java b/metadata/src/main/java/org/apache/kafka/image/DelegationTokenImage.java
index c2c90a4bbb9..41339fc667c 100644
--- a/metadata/src/main/java/org/apache/kafka/image/DelegationTokenImage.java
+++ b/metadata/src/main/java/org/apache/kafka/image/DelegationTokenImage.java
@@ -31,16 +31,14 @@ import java.util.Map.Entry;
/**
* Represents the DelegationToken credentials in the metadata image.
- *
+ *
* This class is thread-safe.
+ *
+ * @param tokens Map TokenID to TokenInformation. The TokenID is also contained in the TokenInformation inside the DelegationTokenData
*/
-public final class DelegationTokenImage {
+public record DelegationTokenImage(Map tokens) {
public static final DelegationTokenImage EMPTY = new DelegationTokenImage(Map.of());
- // Map TokenID to TokenInformation.
- // The TokenID is also contained in the TokenInformation inside the DelegationTokenData
- private final Map tokens;
-
public DelegationTokenImage(Map tokens) {
this.tokens = Collections.unmodifiableMap(tokens);
}
@@ -55,31 +53,14 @@ public final class DelegationTokenImage {
List tokenIds = new ArrayList<>(tokens.keySet());
String delegationTokenImageString = "DelegationTokenImage(" + String.join(", ", tokenIds) + ")";
options.handleLoss(delegationTokenImageString);
- }
+ }
}
}
- public Map tokens() {
- return tokens;
- }
-
public boolean isEmpty() {
return tokens.isEmpty();
}
- @Override
- public int hashCode() {
- return tokens.hashCode();
- }
-
- @Override
- public boolean equals(Object o) {
- if (o == null) return false;
- if (!o.getClass().equals(DelegationTokenImage.class)) return false;
- DelegationTokenImage other = (DelegationTokenImage) o;
- return tokens.equals(other.tokens);
- }
-
@Override
public String toString() {
return new DelegationTokenImageNode(this).stringify();
diff --git a/metadata/src/main/java/org/apache/kafka/image/LocalReplicaChanges.java b/metadata/src/main/java/org/apache/kafka/image/LocalReplicaChanges.java
index e24b1455cb2..317641e4534 100644
--- a/metadata/src/main/java/org/apache/kafka/image/LocalReplicaChanges.java
+++ b/metadata/src/main/java/org/apache/kafka/image/LocalReplicaChanges.java
@@ -92,26 +92,6 @@ public final class LocalReplicaChanges {
);
}
- public static final class PartitionInfo {
- private final Uuid topicId;
- private final PartitionRegistration partition;
-
- public PartitionInfo(Uuid topicId, PartitionRegistration partition) {
- this.topicId = topicId;
- this.partition = partition;
- }
-
- public Uuid topicId() {
- return topicId;
- }
-
- public PartitionRegistration partition() {
- return partition;
- }
-
- @Override
- public String toString() {
- return String.format("PartitionInfo(topicId = %s, partition = %s)", topicId, partition);
- }
+ public record PartitionInfo(Uuid topicId, PartitionRegistration partition) {
}
}
diff --git a/metadata/src/main/java/org/apache/kafka/image/MetadataImage.java b/metadata/src/main/java/org/apache/kafka/image/MetadataImage.java
index da245f00519..99364c039e7 100644
--- a/metadata/src/main/java/org/apache/kafka/image/MetadataImage.java
+++ b/metadata/src/main/java/org/apache/kafka/image/MetadataImage.java
@@ -22,15 +22,16 @@ import org.apache.kafka.image.writer.ImageWriter;
import org.apache.kafka.image.writer.ImageWriterOptions;
import org.apache.kafka.server.common.OffsetAndEpoch;
-import java.util.Objects;
-
/**
* The broker metadata image.
- *
+ *
* This class is thread-safe.
*/
-public final class MetadataImage {
+public record MetadataImage(MetadataProvenance provenance, FeaturesImage features, ClusterImage cluster,
+ TopicsImage topics, ConfigurationsImage configs, ClientQuotasImage clientQuotas,
+ ProducerIdsImage producerIds, AclsImage acls, ScramImage scram,
+ DelegationTokenImage delegationTokens) {
public static final MetadataImage EMPTY = new MetadataImage(
MetadataProvenance.EMPTY,
FeaturesImage.EMPTY,
@@ -43,50 +44,6 @@ public final class MetadataImage {
ScramImage.EMPTY,
DelegationTokenImage.EMPTY);
- private final MetadataProvenance provenance;
-
- private final FeaturesImage features;
-
- private final ClusterImage cluster;
-
- private final TopicsImage topics;
-
- private final ConfigurationsImage configs;
-
- private final ClientQuotasImage clientQuotas;
-
- private final ProducerIdsImage producerIds;
-
- private final AclsImage acls;
-
- private final ScramImage scram;
-
- private final DelegationTokenImage delegationTokens;
-
- public MetadataImage(
- MetadataProvenance provenance,
- FeaturesImage features,
- ClusterImage cluster,
- TopicsImage topics,
- ConfigurationsImage configs,
- ClientQuotasImage clientQuotas,
- ProducerIdsImage producerIds,
- AclsImage acls,
- ScramImage scram,
- DelegationTokenImage delegationTokens
- ) {
- this.provenance = provenance;
- this.features = features;
- this.cluster = cluster;
- this.topics = topics;
- this.configs = configs;
- this.clientQuotas = clientQuotas;
- this.producerIds = producerIds;
- this.acls = acls;
- this.scram = scram;
- this.delegationTokens = delegationTokens;
- }
-
public boolean isEmpty() {
return features.isEmpty() &&
cluster.isEmpty() &&
@@ -99,10 +56,6 @@ public final class MetadataImage {
delegationTokens.isEmpty();
}
- public MetadataProvenance provenance() {
- return provenance;
- }
-
public OffsetAndEpoch highestOffsetAndEpoch() {
return new OffsetAndEpoch(provenance.lastContainedOffset(), provenance.lastContainedEpoch());
}
@@ -111,42 +64,6 @@ public final class MetadataImage {
return provenance.lastContainedOffset();
}
- public FeaturesImage features() {
- return features;
- }
-
- public ClusterImage cluster() {
- return cluster;
- }
-
- public TopicsImage topics() {
- return topics;
- }
-
- public ConfigurationsImage configs() {
- return configs;
- }
-
- public ClientQuotasImage clientQuotas() {
- return clientQuotas;
- }
-
- public ProducerIdsImage producerIds() {
- return producerIds;
- }
-
- public AclsImage acls() {
- return acls;
- }
-
- public ScramImage scram() {
- return scram;
- }
-
- public DelegationTokenImage delegationTokens() {
- return delegationTokens;
- }
-
public void write(ImageWriter writer, ImageWriterOptions options) {
// Features should be written out first so we can include the metadata.version at the beginning of the
// snapshot
@@ -162,37 +79,6 @@ public final class MetadataImage {
writer.close(true);
}
- @Override
- public boolean equals(Object o) {
- if (o == null || !o.getClass().equals(this.getClass())) return false;
- MetadataImage other = (MetadataImage) o;
- return provenance.equals(other.provenance) &&
- features.equals(other.features) &&
- cluster.equals(other.cluster) &&
- topics.equals(other.topics) &&
- configs.equals(other.configs) &&
- clientQuotas.equals(other.clientQuotas) &&
- producerIds.equals(other.producerIds) &&
- acls.equals(other.acls) &&
- scram.equals(other.scram) &&
- delegationTokens.equals(other.delegationTokens);
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(
- provenance,
- features,
- cluster,
- topics,
- configs,
- clientQuotas,
- producerIds,
- acls,
- scram,
- delegationTokens);
- }
-
@Override
public String toString() {
return new MetadataImageNode(this).stringify();
diff --git a/metadata/src/main/java/org/apache/kafka/image/MetadataProvenance.java b/metadata/src/main/java/org/apache/kafka/image/MetadataProvenance.java
index e9cef3e70e6..fe6bf6f5333 100644
--- a/metadata/src/main/java/org/apache/kafka/image/MetadataProvenance.java
+++ b/metadata/src/main/java/org/apache/kafka/image/MetadataProvenance.java
@@ -20,48 +20,18 @@ package org.apache.kafka.image;
import org.apache.kafka.server.common.OffsetAndEpoch;
import org.apache.kafka.snapshot.Snapshots;
-import java.util.Objects;
-
/**
* Information about the source of a metadata image.
*/
-public final class MetadataProvenance {
+public record MetadataProvenance(long lastContainedOffset, int lastContainedEpoch, long lastContainedLogTimeMs,
+ boolean isOffsetBatchAligned) {
public static final MetadataProvenance EMPTY = new MetadataProvenance(-1L, -1, -1L, false);
- private final long lastContainedOffset;
- private final int lastContainedEpoch;
- private final long lastContainedLogTimeMs;
- private final boolean isOffsetBatchAligned;
-
- public MetadataProvenance(
- long lastContainedOffset,
- int lastContainedEpoch,
- long lastContainedLogTimeMs,
- boolean isOffsetBatchAligned
- ) {
- this.lastContainedOffset = lastContainedOffset;
- this.lastContainedEpoch = lastContainedEpoch;
- this.lastContainedLogTimeMs = lastContainedLogTimeMs;
- this.isOffsetBatchAligned = isOffsetBatchAligned;
- }
-
public OffsetAndEpoch snapshotId() {
return new OffsetAndEpoch(lastContainedOffset + 1, lastContainedEpoch);
}
- public long lastContainedOffset() {
- return lastContainedOffset;
- }
-
- public int lastContainedEpoch() {
- return lastContainedEpoch;
- }
-
- public long lastContainedLogTimeMs() {
- return lastContainedLogTimeMs;
- }
-
/**
* Returns whether lastContainedOffset is the last offset in a record batch
*/
@@ -75,32 +45,4 @@ public final class MetadataProvenance {
public String snapshotName() {
return String.format("snapshot %s", Snapshots.filenameFromSnapshotId(snapshotId()));
}
-
- @Override
- public boolean equals(Object o) {
- if (o == null || !o.getClass().equals(this.getClass())) return false;
- MetadataProvenance other = (MetadataProvenance) o;
- return lastContainedOffset == other.lastContainedOffset &&
- lastContainedEpoch == other.lastContainedEpoch &&
- lastContainedLogTimeMs == other.lastContainedLogTimeMs &&
- isOffsetBatchAligned == other.isOffsetBatchAligned;
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(lastContainedOffset,
- lastContainedEpoch,
- lastContainedLogTimeMs,
- isOffsetBatchAligned);
- }
-
- @Override
- public String toString() {
- return "MetadataProvenance(" +
- "lastContainedOffset=" + lastContainedOffset +
- ", lastContainedEpoch=" + lastContainedEpoch +
- ", lastContainedLogTimeMs=" + lastContainedLogTimeMs +
- ", isOffsetBatchAligned=" + isOffsetBatchAligned +
- ")";
- }
}
diff --git a/metadata/src/main/java/org/apache/kafka/image/MetadataVersionChange.java b/metadata/src/main/java/org/apache/kafka/image/MetadataVersionChange.java
index d9b43a306e5..f03e1719950 100644
--- a/metadata/src/main/java/org/apache/kafka/image/MetadataVersionChange.java
+++ b/metadata/src/main/java/org/apache/kafka/image/MetadataVersionChange.java
@@ -21,28 +21,13 @@ import org.apache.kafka.server.common.MetadataVersion;
import java.util.Objects;
-
/**
* A change in the MetadataVersion.
*/
-public final class MetadataVersionChange {
- private final MetadataVersion oldVersion;
- private final MetadataVersion newVersion;
-
- public MetadataVersionChange(
- MetadataVersion oldVersion,
- MetadataVersion newVersion
- ) {
- this.oldVersion = Objects.requireNonNull(oldVersion);
- this.newVersion = Objects.requireNonNull(newVersion);
- }
-
- public MetadataVersion oldVersion() {
- return oldVersion;
- }
-
- public MetadataVersion newVersion() {
- return newVersion;
+public record MetadataVersionChange(MetadataVersion oldVersion, MetadataVersion newVersion) {
+ public MetadataVersionChange {
+ Objects.requireNonNull(oldVersion);
+ Objects.requireNonNull(newVersion);
}
public boolean isUpgrade() {
@@ -52,26 +37,4 @@ public final class MetadataVersionChange {
public boolean isDowngrade() {
return newVersion.isLessThan(oldVersion);
}
-
- @Override
- public boolean equals(Object o) {
- if (o == null || !o.getClass().equals(this.getClass())) return false;
- MetadataVersionChange other = (MetadataVersionChange) o;
- return oldVersion.equals(other.oldVersion) &&
- newVersion.equals(other.newVersion);
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(oldVersion,
- newVersion);
- }
-
- @Override
- public String toString() {
- return "MetadataVersionChange(" +
- "oldVersion=" + oldVersion +
- ", newVersion=" + newVersion +
- ")";
- }
}
diff --git a/metadata/src/main/java/org/apache/kafka/image/ProducerIdsImage.java b/metadata/src/main/java/org/apache/kafka/image/ProducerIdsImage.java
index ea3f76fdfa8..d1a0a4a7d12 100644
--- a/metadata/src/main/java/org/apache/kafka/image/ProducerIdsImage.java
+++ b/metadata/src/main/java/org/apache/kafka/image/ProducerIdsImage.java
@@ -21,36 +21,22 @@ import org.apache.kafka.common.metadata.ProducerIdsRecord;
import org.apache.kafka.image.node.ProducerIdsImageNode;
import org.apache.kafka.image.writer.ImageWriter;
-import java.util.Objects;
-
-
/**
* Stores the highest seen producer ID in the metadata image.
- *
+ *
* This class is thread-safe.
+ *
+ * @param nextProducerId The next producer ID, or -1 in the special case where no producer IDs have been issued.
*/
-public final class ProducerIdsImage {
+public record ProducerIdsImage(long nextProducerId) {
public static final ProducerIdsImage EMPTY = new ProducerIdsImage(-1L);
- /**
- * The next producer ID, or -1 in the special case where no producer IDs have been issued.
- */
- private final long nextProducerId;
-
- public ProducerIdsImage(long nextProducerId) {
- this.nextProducerId = nextProducerId;
- }
-
- public long nextProducerId() {
- return nextProducerId;
- }
-
public void write(ImageWriter writer) {
if (nextProducerId >= 0) {
writer.write(0, new ProducerIdsRecord().
- setBrokerId(-1).
- setBrokerEpoch(-1).
- setNextProducerId(nextProducerId));
+ setBrokerId(-1).
+ setBrokerEpoch(-1).
+ setNextProducerId(nextProducerId));
}
}
@@ -58,17 +44,6 @@ public final class ProducerIdsImage {
return nextProducerId == EMPTY.nextProducerId;
}
- @Override
- public boolean equals(Object o) {
- if (!(o instanceof ProducerIdsImage other)) return false;
- return nextProducerId == other.nextProducerId;
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(nextProducerId);
- }
-
@Override
public String toString() {
return new ProducerIdsImageNode(this).stringify();
diff --git a/metadata/src/main/java/org/apache/kafka/image/ScramImage.java b/metadata/src/main/java/org/apache/kafka/image/ScramImage.java
index c0c17a2a482..60c28d8f72e 100644
--- a/metadata/src/main/java/org/apache/kafka/image/ScramImage.java
+++ b/metadata/src/main/java/org/apache/kafka/image/ScramImage.java
@@ -39,29 +39,27 @@ import java.util.Map.Entry;
/**
* Represents the SCRAM credentials in the metadata image.
- *
+ *
* This class is thread-safe.
*/
-public final class ScramImage {
+public record ScramImage(Map> mechanisms) {
public static final ScramImage EMPTY = new ScramImage(Map.of());
- private final Map> mechanisms;
-
- public ScramImage(Map> mechanisms) {
- this.mechanisms = Collections.unmodifiableMap(mechanisms);
+ public ScramImage {
+ mechanisms = Collections.unmodifiableMap(mechanisms);
}
public void write(ImageWriter writer, ImageWriterOptions options) {
if (options.metadataVersion().isScramSupported()) {
- for (Entry> mechanismEntry : mechanisms.entrySet()) {
- for (Entry userEntry : mechanismEntry.getValue().entrySet()) {
+ for (var mechanismEntry : mechanisms.entrySet()) {
+ for (var userEntry : mechanismEntry.getValue().entrySet()) {
writer.write(0, userEntry.getValue().toRecord(userEntry.getKey(), mechanismEntry.getKey()));
}
}
} else {
boolean isEmpty = true;
StringBuilder scramImageString = new StringBuilder("ScramImage({");
- for (Entry> mechanismEntry : mechanisms.entrySet()) {
+ for (var mechanismEntry : mechanisms.entrySet()) {
if (!mechanismEntry.getValue().isEmpty()) {
scramImageString.append(mechanismEntry.getKey()).append(":");
List users = new ArrayList<>(mechanismEntry.getValue().keySet());
@@ -87,7 +85,7 @@ public final class ScramImage {
if ((users == null) || (users.isEmpty())) {
// If there are no users listed then get all the users
- for (Map scramCredentialDataSet : mechanisms.values()) {
+ for (var scramCredentialDataSet : mechanisms.values()) {
for (String user : scramCredentialDataSet.keySet()) {
uniqueUsers.put(user, false);
}
@@ -105,17 +103,17 @@ public final class ScramImage {
DescribeUserScramCredentialsResponseData retval = new DescribeUserScramCredentialsResponseData();
- for (Map.Entry user : uniqueUsers.entrySet()) {
+ for (Entry user : uniqueUsers.entrySet()) {
DescribeUserScramCredentialsResult result = new DescribeUserScramCredentialsResult().setUser(user.getKey());
if (!user.getValue()) {
boolean dataFound = false;
List credentialInfos = new ArrayList<>();
- for (Map.Entry> mechanismsEntry : mechanisms.entrySet()) {
+ for (var mechanismsEntry : mechanisms.entrySet()) {
Map credentialDataSet = mechanismsEntry.getValue();
if (credentialDataSet.containsKey(user.getKey())) {
credentialInfos.add(new CredentialInfo().setMechanism(mechanismsEntry.getKey().type())
- .setIterations(credentialDataSet.get(user.getKey()).iterations()));
+ .setIterations(credentialDataSet.get(user.getKey()).iterations()));
dataFound = true;
}
}
@@ -123,38 +121,21 @@ public final class ScramImage {
result.setCredentialInfos(credentialInfos);
} else {
result.setErrorCode(Errors.RESOURCE_NOT_FOUND.code())
- .setErrorMessage(DESCRIBE_USER_THAT_DOES_NOT_EXIST + user.getKey());
+ .setErrorMessage(DESCRIBE_USER_THAT_DOES_NOT_EXIST + user.getKey());
}
} else {
result.setErrorCode(Errors.DUPLICATE_RESOURCE.code())
- .setErrorMessage(DESCRIBE_DUPLICATE_USER + user.getKey());
+ .setErrorMessage(DESCRIBE_DUPLICATE_USER + user.getKey());
}
retval.results().add(result);
}
return retval;
}
- public Map> mechanisms() {
- return mechanisms;
- }
-
public boolean isEmpty() {
return mechanisms.isEmpty();
}
- @Override
- public int hashCode() {
- return mechanisms.hashCode();
- }
-
- @Override
- public boolean equals(Object o) {
- if (o == null) return false;
- if (!o.getClass().equals(ScramImage.class)) return false;
- ScramImage other = (ScramImage) o;
- return mechanisms.equals(other.mechanisms);
- }
-
@Override
public String toString() {
return new ScramImageNode(this).stringify();
diff --git a/metadata/src/main/java/org/apache/kafka/image/TopicsImage.java b/metadata/src/main/java/org/apache/kafka/image/TopicsImage.java
index 21dba62576a..36e73921932 100644
--- a/metadata/src/main/java/org/apache/kafka/image/TopicsImage.java
+++ b/metadata/src/main/java/org/apache/kafka/image/TopicsImage.java
@@ -26,26 +26,14 @@ import org.apache.kafka.server.immutable.ImmutableMap;
import org.apache.kafka.server.util.TranslatedValueMapView;
import java.util.Map;
-import java.util.Objects;
/**
* Represents the topics in the metadata image.
- *
+ *
* This class is thread-safe.
*/
-public final class TopicsImage {
- public static final TopicsImage EMPTY = new TopicsImage(ImmutableMap.empty(), ImmutableMap.empty());
-
- private final ImmutableMap topicsById;
- private final ImmutableMap topicsByName;
-
- public TopicsImage(
- ImmutableMap topicsById,
- ImmutableMap topicsByName
- ) {
- this.topicsById = topicsById;
- this.topicsByName = topicsByName;
- }
+public record TopicsImage(ImmutableMap topicsById, ImmutableMap topicsByName) {
+ public static final TopicsImage EMPTY = new TopicsImage(ImmutableMap.empty(), ImmutableMap.empty());
public TopicsImage including(TopicImage topic) {
return new TopicsImage(
@@ -57,14 +45,6 @@ public final class TopicsImage {
return topicsById.isEmpty() && topicsByName.isEmpty();
}
- public ImmutableMap topicsById() {
- return topicsById;
- }
-
- public ImmutableMap topicsByName() {
- return topicsByName;
- }
-
public PartitionRegistration getPartition(Uuid id, int partitionId) {
TopicImage topicImage = topicsById.get(id);
if (topicImage == null) return null;
@@ -85,21 +65,9 @@ public final class TopicsImage {
}
}
- @Override
- public boolean equals(Object o) {
- if (!(o instanceof TopicsImage other)) return false;
- return topicsById.equals(other.topicsById) &&
- topicsByName.equals(other.topicsByName);
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(topicsById, topicsByName);
- }
-
/**
* Expose a view of this TopicsImage as a map from topic names to IDs.
- *
+ *
* Like TopicsImage itself, this map is immutable.
*/
public Map topicNameToIdView() {
@@ -108,7 +76,7 @@ public final class TopicsImage {
/**
* Expose a view of this TopicsImage as a map from IDs to names.
- *
+ *
* Like TopicsImage itself, this map is immutable.
*/
public Map topicIdToNameView() {
diff --git a/metadata/src/main/java/org/apache/kafka/image/loader/SnapshotManifest.java b/metadata/src/main/java/org/apache/kafka/image/loader/SnapshotManifest.java
index 5653a4689ea..cbd5e660c19 100644
--- a/metadata/src/main/java/org/apache/kafka/image/loader/SnapshotManifest.java
+++ b/metadata/src/main/java/org/apache/kafka/image/loader/SnapshotManifest.java
@@ -19,65 +19,16 @@ package org.apache.kafka.image.loader;
import org.apache.kafka.image.MetadataProvenance;
-import java.util.Objects;
-
-
/**
* Contains information about a snapshot that was loaded.
+ *
+ * @param provenance The source of this snapshot.
+ * @param elapsedNs The time in microseconds that it took to load the snapshot.
*/
-public class SnapshotManifest implements LoaderManifest {
- /**
- * The source of this snapshot.
- */
- private final MetadataProvenance provenance;
-
- /**
- * The time in microseconds that it took to load the snapshot.
- */
- private final long elapsedNs;
-
- public SnapshotManifest(
- MetadataProvenance provenance,
- long elapsedNs
- ) {
- this.provenance = provenance;
- this.elapsedNs = elapsedNs;
- }
+public record SnapshotManifest(MetadataProvenance provenance, long elapsedNs) implements LoaderManifest {
@Override
public LoaderManifestType type() {
return LoaderManifestType.SNAPSHOT;
}
-
- @Override
- public MetadataProvenance provenance() {
- return provenance;
- }
-
- public long elapsedNs() {
- return elapsedNs;
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(
- provenance,
- elapsedNs);
- }
-
- @Override
- public boolean equals(Object o) {
- if (o == null || !o.getClass().equals(this.getClass())) return false;
- SnapshotManifest other = (SnapshotManifest) o;
- return provenance.equals(other.provenance) &&
- elapsedNs == other.elapsedNs;
- }
-
- @Override
- public String toString() {
- return "SnapshotManifest(" +
- "provenance=" + provenance +
- ", elapsedNs=" + elapsedNs +
- ")";
- }
}
diff --git a/metadata/src/main/java/org/apache/kafka/image/node/MetadataImageNode.java b/metadata/src/main/java/org/apache/kafka/image/node/MetadataImageNode.java
index d9b0ae880f8..0eebac81521 100644
--- a/metadata/src/main/java/org/apache/kafka/image/node/MetadataImageNode.java
+++ b/metadata/src/main/java/org/apache/kafka/image/node/MetadataImageNode.java
@@ -24,17 +24,15 @@ import java.util.Map;
import java.util.function.Function;
-public class MetadataImageNode implements MetadataNode {
+/**
+ * @param image The metadata image.
+ */
+public record MetadataImageNode(MetadataImage image) implements MetadataNode {
/**
* The name of this node.
*/
public static final String NAME = "image";
- /**
- * The metadata image.
- */
- private final MetadataImage image;
-
private static final Map> CHILDREN = Map.of(
ProvenanceNode.NAME, image -> new ProvenanceNode(image.provenance()),
FeaturesImageNode.NAME, image -> new FeaturesImageNode(image.features()),
@@ -48,14 +46,6 @@ public class MetadataImageNode implements MetadataNode {
DelegationTokenImageNode.NAME, image -> new DelegationTokenImageNode(image.delegationTokens())
);
- public MetadataImageNode(MetadataImage image) {
- this.image = image;
- }
-
- public MetadataImage image() {
- return image;
- }
-
@Override
public Collection childNames() {
return CHILDREN.keySet();
diff --git a/metadata/src/main/java/org/apache/kafka/metadata/BrokerHeartbeatReply.java b/metadata/src/main/java/org/apache/kafka/metadata/BrokerHeartbeatReply.java
index 9380bccbcfd..011c3c09952 100644
--- a/metadata/src/main/java/org/apache/kafka/metadata/BrokerHeartbeatReply.java
+++ b/metadata/src/main/java/org/apache/kafka/metadata/BrokerHeartbeatReply.java
@@ -17,76 +17,13 @@
package org.apache.kafka.metadata;
-import java.util.Objects;
-
-public class BrokerHeartbeatReply {
- /**
- * True if the heartbeat reply should tell the broker that it has caught up.
- */
- private final boolean isCaughtUp;
-
- /**
- * True if the heartbeat reply should tell the broker that it is fenced.
- */
- private final boolean isFenced;
-
- /**
- * True if the broker is currently in a controlled shutdown state.
- */
- private final boolean inControlledShutdown;
-
- /**
- * True if the heartbeat reply should tell the broker that it should shut down.
- */
- private final boolean shouldShutDown;
-
- public BrokerHeartbeatReply(boolean isCaughtUp,
- boolean isFenced,
- boolean inControlledShutdown,
- boolean shouldShutDown) {
- this.isCaughtUp = isCaughtUp;
- this.isFenced = isFenced;
- this.inControlledShutdown = inControlledShutdown;
- this.shouldShutDown = shouldShutDown;
- }
-
- public boolean isCaughtUp() {
- return isCaughtUp;
- }
-
- public boolean isFenced() {
- return isFenced;
- }
-
- public boolean inControlledShutdown() {
- return inControlledShutdown;
- }
-
- public boolean shouldShutDown() {
- return shouldShutDown;
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(isCaughtUp, isFenced, inControlledShutdown, shouldShutDown);
- }
-
- @Override
- public boolean equals(Object o) {
- if (!(o instanceof BrokerHeartbeatReply other)) return false;
- return other.isCaughtUp == isCaughtUp &&
- other.isFenced == isFenced &&
- other.inControlledShutdown == inControlledShutdown &&
- other.shouldShutDown == shouldShutDown;
- }
-
- @Override
- public String toString() {
- return "BrokerHeartbeatReply(isCaughtUp=" + isCaughtUp +
- ", isFenced=" + isFenced +
- ", inControlledShutdown=" + inControlledShutdown +
- ", shouldShutDown = " + shouldShutDown +
- ")";
- }
+/**
+ * @param isCaughtUp True if the heartbeat reply should tell the broker that it has caught up.
+ * @param isFenced True if the heartbeat reply should tell the broker that it is fenced.
+ * @param inControlledShutdown True if the broker is currently in a controlled shutdown state.
+ * @param shouldShutDown True if the heartbeat reply should tell the broker that it should shut down.
+ */
+public record BrokerHeartbeatReply(boolean isCaughtUp, boolean isFenced, boolean inControlledShutdown,
+ boolean shouldShutDown) {
}
diff --git a/metadata/src/main/java/org/apache/kafka/metadata/BrokerRegistrationReply.java b/metadata/src/main/java/org/apache/kafka/metadata/BrokerRegistrationReply.java
index cfd86e67ab5..977ec2f9863 100644
--- a/metadata/src/main/java/org/apache/kafka/metadata/BrokerRegistrationReply.java
+++ b/metadata/src/main/java/org/apache/kafka/metadata/BrokerRegistrationReply.java
@@ -17,33 +17,6 @@
package org.apache.kafka.metadata;
-import java.util.Objects;
-
-public class BrokerRegistrationReply {
- private final long epoch;
-
- public BrokerRegistrationReply(long epoch) {
- this.epoch = epoch;
- }
-
- public long epoch() {
- return epoch;
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(epoch);
- }
-
- @Override
- public boolean equals(Object o) {
- if (!(o instanceof BrokerRegistrationReply other)) return false;
- return other.epoch == epoch;
- }
-
- @Override
- public String toString() {
- return "BrokerRegistrationReply(epoch=" + epoch + ")";
- }
+public record BrokerRegistrationReply(long epoch) {
}
diff --git a/metadata/src/main/java/org/apache/kafka/metadata/DelegationTokenData.java b/metadata/src/main/java/org/apache/kafka/metadata/DelegationTokenData.java
index df853169e19..9ed69f01081 100644
--- a/metadata/src/main/java/org/apache/kafka/metadata/DelegationTokenData.java
+++ b/metadata/src/main/java/org/apache/kafka/metadata/DelegationTokenData.java
@@ -24,16 +24,13 @@ import org.apache.kafka.common.utils.SecurityUtils;
import java.util.ArrayList;
import java.util.List;
-import java.util.Objects;
/**
* Represents the Delegation Tokens in the metadata image.
- *
+ *
* This class is thread-safe.
*/
-public final class DelegationTokenData {
-
- private final TokenInformation tokenInformation;
+public record DelegationTokenData(TokenInformation tokenInformation) {
public static DelegationTokenData fromRecord(DelegationTokenRecord record) {
List renewers = new ArrayList<>();
@@ -50,14 +47,6 @@ public final class DelegationTokenData {
record.expirationTimestamp()));
}
- public DelegationTokenData(TokenInformation tokenInformation) {
- this.tokenInformation = tokenInformation;
- }
-
- public TokenInformation tokenInformation() {
- return tokenInformation;
- }
-
public DelegationTokenRecord toRecord() {
return new DelegationTokenRecord()
.setOwner(tokenInformation.ownerAsString())
@@ -69,19 +58,6 @@ public final class DelegationTokenData {
.setTokenId(tokenInformation.tokenId());
}
- @Override
- public int hashCode() {
- return Objects.hash(tokenInformation);
- }
-
- @Override
- public boolean equals(Object o) {
- if (o == null) return false;
- if (!o.getClass().equals(DelegationTokenData.class)) return false;
- DelegationTokenData other = (DelegationTokenData) o;
- return tokenInformation.equals(other.tokenInformation);
- }
-
/*
* We explicitly hide tokenInformation when converting DelegationTokenData to string
* For legacy reasons, we did not change TokenInformation to hide sensitive data.
diff --git a/metadata/src/main/java/org/apache/kafka/metadata/FinalizedControllerFeatures.java b/metadata/src/main/java/org/apache/kafka/metadata/FinalizedControllerFeatures.java
index 0a5af620f2a..d66f6ebe02c 100644
--- a/metadata/src/main/java/org/apache/kafka/metadata/FinalizedControllerFeatures.java
+++ b/metadata/src/main/java/org/apache/kafka/metadata/FinalizedControllerFeatures.java
@@ -19,18 +19,13 @@ package org.apache.kafka.metadata;
import java.util.Collections;
import java.util.Map;
-import java.util.Objects;
import java.util.Optional;
import java.util.Set;
-
/**
* A map of feature names to their supported versions.
*/
-public class FinalizedControllerFeatures {
- private final Map featureMap;
- private final long epoch;
-
+public record FinalizedControllerFeatures(Map featureMap, long epoch) {
public FinalizedControllerFeatures(Map featureMap, long epoch) {
this.featureMap = Collections.unmodifiableMap(featureMap);
this.epoch = epoch;
@@ -47,31 +42,4 @@ public class FinalizedControllerFeatures {
public Set featureNames() {
return featureMap.keySet();
}
-
- public Map featureMap() {
- return featureMap;
- }
-
- public long epoch() {
- return epoch;
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(featureMap, epoch);
- }
-
- @Override
- public boolean equals(Object o) {
- if (!(o instanceof FinalizedControllerFeatures other)) return false;
- return featureMap.equals(other.featureMap) && epoch == other.epoch;
- }
-
- @Override
- public String toString() {
- return "FinalizedControllerFeatures(" +
- "featureMap=" + featureMap.toString() +
- ", epoch=" + epoch +
- ")";
- }
}
diff --git a/metadata/src/main/java/org/apache/kafka/metadata/ScramCredentialData.java b/metadata/src/main/java/org/apache/kafka/metadata/ScramCredentialData.java
index 4339a572b71..ff35881baaa 100644
--- a/metadata/src/main/java/org/apache/kafka/metadata/ScramCredentialData.java
+++ b/metadata/src/main/java/org/apache/kafka/metadata/ScramCredentialData.java
@@ -26,51 +26,18 @@ import java.util.Objects;
/**
* Represents the ACLs in the metadata image.
- *
+ *
* This class is thread-safe.
*/
-public final class ScramCredentialData {
- private final byte[] salt;
- private final byte[] storedKey;
- private final byte[] serverKey;
- private final int iterations;
-
+public record ScramCredentialData(byte[] salt, byte[] storedKey, byte[] serverKey, int iterations) {
public static ScramCredentialData fromRecord(
UserScramCredentialRecord record
) {
return new ScramCredentialData(
- record.salt(),
- record.storedKey(),
- record.serverKey(),
- record.iterations());
- }
-
- public ScramCredentialData(
- byte[] salt,
- byte[] storedKey,
- byte[] serverKey,
- int iterations
- ) {
- this.salt = salt;
- this.storedKey = storedKey;
- this.serverKey = serverKey;
- this.iterations = iterations;
- }
-
- public byte[] salt() {
- return salt;
- }
-
- public byte[] storedKey() {
- return storedKey;
- }
-
- public byte[] serverKey() {
- return serverKey;
- }
-
- public int iterations() {
- return iterations;
+ record.salt(),
+ record.storedKey(),
+ record.serverKey(),
+ record.iterations());
}
public UserScramCredentialRecord toRecord(
@@ -78,12 +45,12 @@ public final class ScramCredentialData {
ScramMechanism mechanism
) {
return new UserScramCredentialRecord().
- setName(userName).
- setMechanism(mechanism.type()).
- setSalt(salt).
- setStoredKey(storedKey).
- setServerKey(serverKey).
- setIterations(iterations);
+ setName(userName).
+ setMechanism(mechanism.type()).
+ setSalt(salt).
+ setStoredKey(storedKey).
+ setServerKey(serverKey).
+ setIterations(iterations);
}
public ScramCredential toCredential() {
@@ -106,9 +73,9 @@ public final class ScramCredentialData {
if (!o.getClass().equals(ScramCredentialData.class)) return false;
ScramCredentialData other = (ScramCredentialData) o;
return Arrays.equals(salt, other.salt) &&
- Arrays.equals(storedKey, other.storedKey) &&
- Arrays.equals(serverKey, other.serverKey) &&
- iterations == other.iterations;
+ Arrays.equals(storedKey, other.storedKey) &&
+ Arrays.equals(serverKey, other.serverKey) &&
+ iterations == other.iterations;
}
@Override
diff --git a/metadata/src/main/java/org/apache/kafka/metadata/authorizer/StandardAcl.java b/metadata/src/main/java/org/apache/kafka/metadata/authorizer/StandardAcl.java
index 8817b93bae4..21a142d3b80 100644
--- a/metadata/src/main/java/org/apache/kafka/metadata/authorizer/StandardAcl.java
+++ b/metadata/src/main/java/org/apache/kafka/metadata/authorizer/StandardAcl.java
@@ -27,13 +27,13 @@ import org.apache.kafka.common.resource.ResourcePattern;
import org.apache.kafka.common.resource.ResourceType;
import org.apache.kafka.common.security.auth.KafkaPrincipal;
-import java.util.Objects;
-
/**
* A Kafka ACLs which is identified by a UUID and stored in the metadata log.
*/
-public final class StandardAcl implements Comparable {
+public record StandardAcl(ResourceType resourceType, String resourceName, PatternType patternType, String principal,
+ String host, AclOperation operation,
+ AclPermissionType permissionType) implements Comparable {
public static StandardAcl fromRecord(AccessControlEntryRecord record) {
return new StandardAcl(
ResourceType.fromCode(record.resourceType()),
@@ -56,47 +56,6 @@ public final class StandardAcl implements Comparable {
acl.entry().permissionType());
}
- private final ResourceType resourceType;
- private final String resourceName;
- private final PatternType patternType;
- private final String principal;
- private final String host;
- private final AclOperation operation;
- private final AclPermissionType permissionType;
-
- public StandardAcl(
- ResourceType resourceType,
- String resourceName,
- PatternType patternType,
- String principal,
- String host,
- AclOperation operation,
- AclPermissionType permissionType) {
- this.resourceType = resourceType;
- this.resourceName = resourceName;
- this.patternType = patternType;
- this.principal = principal;
- this.host = host;
- this.operation = operation;
- this.permissionType = permissionType;
- }
-
- public ResourceType resourceType() {
- return resourceType;
- }
-
- public String resourceName() {
- return resourceName;
- }
-
- public PatternType patternType() {
- return patternType;
- }
-
- public String principal() {
- return principal;
- }
-
public KafkaPrincipal kafkaPrincipal() {
int colonIndex = principal.indexOf(":");
if (colonIndex == -1) {
@@ -108,18 +67,6 @@ public final class StandardAcl implements Comparable {
return new KafkaPrincipal(principalType, principalName);
}
- public String host() {
- return host;
- }
-
- public AclOperation operation() {
- return operation;
- }
-
- public AclPermissionType permissionType() {
- return permissionType;
- }
-
public AclBinding toBinding() {
ResourcePattern resourcePattern =
new ResourcePattern(resourceType, resourceName, patternType);
@@ -128,32 +75,6 @@ public final class StandardAcl implements Comparable {
return new AclBinding(resourcePattern, accessControlEntry);
}
- @Override
- public boolean equals(Object o) {
- if (o == null || !o.getClass().equals(StandardAcl.class)) return false;
- if (o == this) return true;
- StandardAcl other = (StandardAcl) o;
- return resourceType.equals(other.resourceType) &&
- resourceName.equals(other.resourceName) &&
- patternType.equals(other.patternType) &&
- principal.equals(other.principal) &&
- host.equals(other.host) &&
- operation.equals(other.operation) &&
- permissionType.equals(other.permissionType);
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(
- resourceType,
- resourceName,
- patternType,
- principal,
- host,
- operation,
- permissionType);
- }
-
/**
* Compare two StandardAcl objects. See {@link StandardAuthorizerData#authorize} for an
* explanation of why we want this particular sort order.
@@ -176,17 +97,4 @@ public final class StandardAcl implements Comparable {
result = permissionType.compareTo(other.permissionType);
return result;
}
-
- @Override
- public String toString() {
- return "StandardAcl(" +
- "resourceType=" + resourceType +
- ", resourceName=" + resourceName +
- ", patternType=" + patternType +
- ", principal=" + principal +
- ", host=" + host +
- ", operation=" + operation +
- ", permissionType=" + permissionType +
- ")";
- }
}
diff --git a/metadata/src/main/java/org/apache/kafka/metadata/authorizer/StandardAclWithId.java b/metadata/src/main/java/org/apache/kafka/metadata/authorizer/StandardAclWithId.java
index 822a7752e0e..83a9d3f2bc9 100644
--- a/metadata/src/main/java/org/apache/kafka/metadata/authorizer/StandardAclWithId.java
+++ b/metadata/src/main/java/org/apache/kafka/metadata/authorizer/StandardAclWithId.java
@@ -21,34 +21,14 @@ import org.apache.kafka.common.Uuid;
import org.apache.kafka.common.acl.AclBinding;
import org.apache.kafka.common.metadata.AccessControlEntryRecord;
-import java.util.Objects;
-
-
/**
* A tuple of (id, acl)
*/
-public final class StandardAclWithId {
+public record StandardAclWithId(Uuid id, StandardAcl acl) {
public static StandardAclWithId fromRecord(AccessControlEntryRecord record) {
return new StandardAclWithId(record.id(), StandardAcl.fromRecord(record));
}
- private final Uuid id;
- private final StandardAcl acl;
-
- public StandardAclWithId(Uuid id,
- StandardAcl acl) {
- this.id = id;
- this.acl = acl;
- }
-
- public Uuid id() {
- return id;
- }
-
- public StandardAcl acl() {
- return acl;
- }
-
public AccessControlEntryRecord toRecord() {
return new AccessControlEntryRecord().
setId(id).
@@ -64,26 +44,4 @@ public final class StandardAclWithId {
public AclBinding toBinding() {
return acl.toBinding();
}
-
- @Override
- public boolean equals(Object o) {
- if (o == null || !o.getClass().equals(StandardAclWithId.class)) return false;
- if (o == this) return true;
- StandardAclWithId other = (StandardAclWithId) o;
- return id.equals(other.id) &&
- acl.equals(other.acl);
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(id, acl);
- }
-
- @Override
- public String toString() {
- return "StandardAclWithId(" +
- "id=" + id +
- ", acl=" + acl +
- ")";
- }
}
diff --git a/metadata/src/main/java/org/apache/kafka/metadata/authorizer/StandardAuthorizerData.java b/metadata/src/main/java/org/apache/kafka/metadata/authorizer/StandardAuthorizerData.java
index be50a0c895b..54bb6ea4784 100644
--- a/metadata/src/main/java/org/apache/kafka/metadata/authorizer/StandardAuthorizerData.java
+++ b/metadata/src/main/java/org/apache/kafka/metadata/authorizer/StandardAuthorizerData.java
@@ -550,17 +550,7 @@ public class StandardAuthorizerData {
}
}
- private static class DefaultRule implements MatchingRule {
- private final AuthorizationResult result;
-
- private DefaultRule(AuthorizationResult result) {
- this.result = result;
- }
-
- @Override
- public AuthorizationResult result() {
- return result;
- }
+ private record DefaultRule(AuthorizationResult result) implements MatchingRule {
@Override
public String toString() {
@@ -568,19 +558,7 @@ public class StandardAuthorizerData {
}
}
- private static class MatchingAclRule implements MatchingRule {
- private final StandardAcl acl;
- private final AuthorizationResult result;
-
- private MatchingAclRule(StandardAcl acl, AuthorizationResult result) {
- this.acl = acl;
- this.result = result;
- }
-
- @Override
- public AuthorizationResult result() {
- return result;
- }
+ private record MatchingAclRule(StandardAcl acl, AuthorizationResult result) implements MatchingRule {
@Override
public String toString() {
diff --git a/metadata/src/main/java/org/apache/kafka/metadata/placement/PlacementSpec.java b/metadata/src/main/java/org/apache/kafka/metadata/placement/PlacementSpec.java
index 85daaf59e5d..c3247f1981c 100644
--- a/metadata/src/main/java/org/apache/kafka/metadata/placement/PlacementSpec.java
+++ b/metadata/src/main/java/org/apache/kafka/metadata/placement/PlacementSpec.java
@@ -19,65 +19,9 @@ package org.apache.kafka.metadata.placement;
import org.apache.kafka.common.annotation.InterfaceStability;
-import java.util.Objects;
-
-
/**
* Specifies a replica placement that we want to make.
*/
@InterfaceStability.Unstable
-public class PlacementSpec {
- private final int startPartition;
-
- private final int numPartitions;
-
- private final short numReplicas;
-
- public PlacementSpec(
- int startPartition,
- int numPartitions,
- short numReplicas
- ) {
- this.startPartition = startPartition;
- this.numPartitions = numPartitions;
- this.numReplicas = numReplicas;
- }
-
- public int startPartition() {
- return startPartition;
- }
-
- public int numPartitions() {
- return numPartitions;
- }
-
- public short numReplicas() {
- return numReplicas;
- }
-
- @Override
- public boolean equals(Object o) {
- if (o == null) return false;
- if (!(o.getClass().equals(this.getClass()))) return false;
- PlacementSpec other = (PlacementSpec) o;
- return startPartition == other.startPartition &&
- numPartitions == other.numPartitions &&
- numReplicas == other.numReplicas;
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(startPartition,
- numPartitions,
- numReplicas);
- }
-
- @Override
- public String toString() {
- return "PlacementSpec" +
- "(startPartition=" + startPartition +
- ", numPartitions=" + numPartitions +
- ", numReplicas=" + numReplicas +
- ")";
- }
+public record PlacementSpec(int startPartition, int numPartitions, short numReplicas) {
}
diff --git a/metadata/src/main/java/org/apache/kafka/metadata/placement/TopicAssignment.java b/metadata/src/main/java/org/apache/kafka/metadata/placement/TopicAssignment.java
index 88bdc5df96c..e34ae025174 100644
--- a/metadata/src/main/java/org/apache/kafka/metadata/placement/TopicAssignment.java
+++ b/metadata/src/main/java/org/apache/kafka/metadata/placement/TopicAssignment.java
@@ -18,42 +18,14 @@
package org.apache.kafka.metadata.placement;
import java.util.List;
-import java.util.Objects;
/**
* The topic assignment.
- *
+ *
* This class is immutable. It's internal state does not change.
*/
-public class TopicAssignment {
- private final List assignments;
-
- public TopicAssignment(List assignments) {
- this.assignments = List.copyOf(assignments);
- }
-
- /**
- * @return The replica assignment for each partition, where the index in the list corresponds to different partition.
- */
- public List assignments() {
- return assignments;
- }
-
- @Override
- public boolean equals(Object o) {
- if (!(o instanceof TopicAssignment other)) return false;
- return assignments.equals(other.assignments);
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(assignments);
- }
-
- @Override
- public String toString() {
- return "TopicAssignment" +
- "(assignments=" + assignments +
- ")";
+public record TopicAssignment(List assignments) {
+ public TopicAssignment {
+ assignments = List.copyOf(assignments);
}
}
diff --git a/metadata/src/main/java/org/apache/kafka/metadata/placement/UsableBroker.java b/metadata/src/main/java/org/apache/kafka/metadata/placement/UsableBroker.java
index 17f531d023f..2c24d937f9e 100644
--- a/metadata/src/main/java/org/apache/kafka/metadata/placement/UsableBroker.java
+++ b/metadata/src/main/java/org/apache/kafka/metadata/placement/UsableBroker.java
@@ -19,58 +19,11 @@ package org.apache.kafka.metadata.placement;
import org.apache.kafka.common.annotation.InterfaceStability;
-import java.util.Objects;
import java.util.Optional;
-
/**
* A broker where a replica can be placed.
*/
@InterfaceStability.Unstable
-public class UsableBroker {
- private final int id;
-
- private final Optional rack;
-
- private final boolean fenced;
-
- public UsableBroker(int id, Optional rack, boolean fenced) {
- this.id = id;
- this.rack = rack;
- this.fenced = fenced;
- }
-
- public int id() {
- return id;
- }
-
- public Optional rack() {
- return rack;
- }
-
- public boolean fenced() {
- return fenced;
- }
-
- @Override
- public boolean equals(Object o) {
- if (!(o instanceof UsableBroker other)) return false;
- return other.id == id && other.rack.equals(rack) && other.fenced == fenced;
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(id,
- rack,
- fenced);
- }
-
- @Override
- public String toString() {
- return "UsableBroker" +
- "(id=" + id +
- ", rack=" + rack +
- ", fenced=" + fenced +
- ")";
- }
+public record UsableBroker(int id, Optional rack, boolean fenced) {
}
diff --git a/metadata/src/main/java/org/apache/kafka/metadata/util/BatchFileReader.java b/metadata/src/main/java/org/apache/kafka/metadata/util/BatchFileReader.java
index 11f6403a850..7ac61b9e502 100644
--- a/metadata/src/main/java/org/apache/kafka/metadata/util/BatchFileReader.java
+++ b/metadata/src/main/java/org/apache/kafka/metadata/util/BatchFileReader.java
@@ -69,22 +69,7 @@ public final class BatchFileReader implements Iterator batch;
- private final boolean isControl;
-
- public BatchAndType(Batch batch, boolean isControl) {
- this.batch = batch;
- this.isControl = isControl;
- }
-
- public Batch batch() {
- return batch;
- }
-
- public boolean isControl() {
- return isControl;
- }
+ public record BatchAndType(Batch batch, boolean isControl) {
}
private final FileRecords fileRecords;
diff --git a/metadata/src/test/java/org/apache/kafka/controller/PartitionReassignmentReplicasTest.java b/metadata/src/test/java/org/apache/kafka/controller/PartitionReassignmentReplicasTest.java
index 1a9cd50bd1d..33c540b9d14 100644
--- a/metadata/src/test/java/org/apache/kafka/controller/PartitionReassignmentReplicasTest.java
+++ b/metadata/src/test/java/org/apache/kafka/controller/PartitionReassignmentReplicasTest.java
@@ -113,8 +113,8 @@ public class PartitionReassignmentReplicasTest {
replicas.maybeCompleteReassignment(List.of(0, 1, 2, 3, 4, 5));
assertTrue(reassignmentOptional.isPresent());
PartitionReassignmentReplicas.CompletedReassignment completedReassignment = reassignmentOptional.get();
- assertEquals(List.of(3, 4, 5), completedReassignment.isr);
- assertEquals(List.of(3, 4, 5), completedReassignment.replicas);
+ assertEquals(List.of(3, 4, 5), completedReassignment.isr());
+ assertEquals(List.of(3, 4, 5), completedReassignment.replicas());
}
@Test
@@ -126,8 +126,8 @@ public class PartitionReassignmentReplicasTest {
replicas.maybeCompleteReassignment(List.of(0, 1, 2, 3));
assertTrue(reassignmentOptional.isPresent());
PartitionReassignmentReplicas.CompletedReassignment completedReassignment = reassignmentOptional.get();
- assertEquals(List.of(0, 1, 3), completedReassignment.isr);
- assertEquals(List.of(0, 1, 3), completedReassignment.replicas);
+ assertEquals(List.of(0, 1, 3), completedReassignment.isr());
+ assertEquals(List.of(0, 1, 3), completedReassignment.replicas());
}
@Test
diff --git a/metadata/src/test/java/org/apache/kafka/controller/PeriodicTaskControlManagerTest.java b/metadata/src/test/java/org/apache/kafka/controller/PeriodicTaskControlManagerTest.java
index aebdb596c6c..2d62fdd02b1 100644
--- a/metadata/src/test/java/org/apache/kafka/controller/PeriodicTaskControlManagerTest.java
+++ b/metadata/src/test/java/org/apache/kafka/controller/PeriodicTaskControlManagerTest.java
@@ -64,20 +64,7 @@ public class PeriodicTaskControlManagerTest {
}
}
- static class TrackedTask {
- final String tag;
- final long deadlineNs;
- final Supplier> op;
-
- TrackedTask(
- String tag,
- long deadlineNs,
- Supplier> op
- ) {
- this.tag = tag;
- this.deadlineNs = deadlineNs;
- this.op = op;
- }
+ record TrackedTask(String tag, long deadlineNs, Supplier> op) {
}
static class PeriodicTaskControlManagerTestEnv implements PeriodicTaskControlManager.QueueAccessor {
diff --git a/metadata/src/test/java/org/apache/kafka/image/MetadataVersionChangeTest.java b/metadata/src/test/java/org/apache/kafka/image/MetadataVersionChangeTest.java
index 323cc5abf9a..4cfd4334694 100644
--- a/metadata/src/test/java/org/apache/kafka/image/MetadataVersionChangeTest.java
+++ b/metadata/src/test/java/org/apache/kafka/image/MetadataVersionChangeTest.java
@@ -24,6 +24,7 @@ import org.junit.jupiter.api.Timeout;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
@@ -57,4 +58,22 @@ public class MetadataVersionChangeTest {
"is changing from " + MetadataVersion.latestProduction() + " to " + MetadataVersion.MINIMUM_VERSION,
new MetadataVersionChangeException(CHANGE_LATEST_TO_MINIMUM).toString());
}
+
+ @Test
+ public void testConstructorThrowsExceptionWhenOldVersionIsNull() {
+ assertThrows(NullPointerException.class, () ->
+ new MetadataVersionChange(null, MetadataVersion.MINIMUM_VERSION));
+ }
+
+ @Test
+ public void testConstructorThrowsExceptionWhenNewVersionIsNull() {
+ assertThrows(NullPointerException.class, () ->
+ new MetadataVersionChange(MetadataVersion.MINIMUM_VERSION, null));
+ }
+
+ @Test
+ public void testConstructorThrowsExceptionWhenBothVersionsAreNull() {
+ assertThrows(NullPointerException.class, () ->
+ new MetadataVersionChange(null, null));
+ }
}
diff --git a/metadata/src/test/java/org/apache/kafka/metadata/RecordTestUtils.java b/metadata/src/test/java/org/apache/kafka/metadata/RecordTestUtils.java
index c022ae79893..bd15406cbb0 100644
--- a/metadata/src/test/java/org/apache/kafka/metadata/RecordTestUtils.java
+++ b/metadata/src/test/java/org/apache/kafka/metadata/RecordTestUtils.java
@@ -106,22 +106,7 @@ public class RecordTestUtils {
}
}
- public static class ImageDeltaPair {
- private final Supplier imageSupplier;
- private final Function deltaCreator;
-
- public ImageDeltaPair(Supplier imageSupplier, Function deltaCreator) {
- this.imageSupplier = imageSupplier;
- this.deltaCreator = deltaCreator;
- }
-
- public Supplier imageSupplier() {
- return imageSupplier;
- }
-
- public Function deltaCreator() {
- return deltaCreator;
- }
+ public record ImageDeltaPair(Supplier imageSupplier, Function deltaCreator) {
}
public static class TestThroughAllIntermediateImagesLeadingToFinalImageHelper {
diff --git a/metadata/src/test/java/org/apache/kafka/metadata/authorizer/StandardAuthorizerTest.java b/metadata/src/test/java/org/apache/kafka/metadata/authorizer/StandardAuthorizerTest.java
index b2f9fa68b3a..74d5a49d7ea 100644
--- a/metadata/src/test/java/org/apache/kafka/metadata/authorizer/StandardAuthorizerTest.java
+++ b/metadata/src/test/java/org/apache/kafka/metadata/authorizer/StandardAuthorizerTest.java
@@ -569,9 +569,9 @@ public class StandardAuthorizerTest {
String expectedAuditLog = "Principal = User:bob is Denied operation = READ " +
"from host = 127.0.0.1 on resource = Topic:LITERAL:alpha for request = Fetch " +
- "with resourceRefCount = 1 based on rule MatchingAcl(acl=StandardAcl(resourceType=TOPIC, " +
+ "with resourceRefCount = 1 based on rule MatchingAcl(acl=StandardAcl[resourceType=TOPIC, " +
"resourceName=alp, patternType=PREFIXED, principal=User:bob, host=*, operation=READ, " +
- "permissionType=DENY))";
+ "permissionType=DENY])";
if (logIfDenied) {
Mockito.verify(auditLog).info(expectedAuditLog);
@@ -611,9 +611,9 @@ public class StandardAuthorizerTest {
String expectedAuditLog = "Principal = User:bob is Allowed operation = READ " +
"from host = 127.0.0.1 on resource = Topic:LITERAL:green1 for request = Fetch " +
- "with resourceRefCount = 1 based on rule MatchingAcl(acl=StandardAcl(resourceType=TOPIC, " +
+ "with resourceRefCount = 1 based on rule MatchingAcl(acl=StandardAcl[resourceType=TOPIC, " +
"resourceName=green, patternType=PREFIXED, principal=User:bob, host=*, operation=READ, " +
- "permissionType=ALLOW))";
+ "permissionType=ALLOW])";
if (logIfAllowed) {
Mockito.verify(auditLog).debug(expectedAuditLog);
diff --git a/metadata/src/test/java/org/apache/kafka/metadata/placement/TopicAssignmentTest.java b/metadata/src/test/java/org/apache/kafka/metadata/placement/TopicAssignmentTest.java
index 3551e2ce7b0..81625151dba 100644
--- a/metadata/src/test/java/org/apache/kafka/metadata/placement/TopicAssignmentTest.java
+++ b/metadata/src/test/java/org/apache/kafka/metadata/placement/TopicAssignmentTest.java
@@ -85,7 +85,7 @@ public class TopicAssignmentTest {
new PartitionAssignment(replicas, directories::get)
);
TopicAssignment topicAssignment = new TopicAssignment(partitionAssignments);
- assertEquals("TopicAssignment(assignments=[PartitionAssignment(replicas=[0, 1, 2], " +
- "directories=[v56qeYzNRrqNtXsxzcReog, MvUIAsOiRlSePeiBHdZrSQ, jUqCchHtTHqMxeVv4dw1RA])])", topicAssignment.toString());
+ assertEquals("TopicAssignment[assignments=[PartitionAssignment(replicas=[0, 1, 2], " +
+ "directories=[v56qeYzNRrqNtXsxzcReog, MvUIAsOiRlSePeiBHdZrSQ, jUqCchHtTHqMxeVv4dw1RA])]]", topicAssignment.toString());
}
}
diff --git a/release/README.md b/release/README.md
index 1f56f679011..bde487fff35 100644
--- a/release/README.md
+++ b/release/README.md
@@ -25,7 +25,7 @@ pip install -r requirements.txt
# Usage
-To start a release, first activate the virutalenv, and then run
+To start a release, first activate the virtualenv, and then run
the release script.
```
diff --git a/release/git.py b/release/git.py
index 9cb106df2fd..02d7dc8cbe2 100644
--- a/release/git.py
+++ b/release/git.py
@@ -136,4 +136,3 @@ def push_ref(ref, remote=push_remote_name, **kwargs):
def merge_ref(ref, **kwargs):
__defaults(kwargs)
cmd(f"Merging ref {ref}", f"git merge {ref}")
-
diff --git a/release/gpg.py b/release/gpg.py
index 6b396119265..347389e3089 100644
--- a/release/gpg.py
+++ b/release/gpg.py
@@ -32,7 +32,7 @@ def key_exists(key_id):
"""
try:
execute(f"gpg --list-keys {key_id}")
- except Exception as e:
+ except Exception:
return False
return True
@@ -70,13 +70,13 @@ def valid_passphrase(key_id, passphrase):
with tempfile.TemporaryDirectory() as tmpdir:
content = __file__
signature = tmpdir + '/sig.asc'
- # if the agent is running, the suplied passphrase may be ignored
+ # if the agent is running, the supplied passphrase may be ignored
agent_kill()
try:
sign(key_id, passphrase, content, signature)
verify(content, signature)
- except subprocess.CalledProcessError as e:
- False
+ except subprocess.CalledProcessError:
+ return False
return True
@@ -88,5 +88,3 @@ def key_pass_id(key_id, passphrase):
h.update(key_id.encode())
h.update(passphrase.encode())
return h.hexdigest()
-
-
diff --git a/release/notes.py b/release/notes.py
index 529c27b3728..e561fa03a20 100644
--- a/release/notes.py
+++ b/release/notes.py
@@ -41,13 +41,13 @@ def query(query, **kwargs):
Any additional keyword arguments are forwarded to jira.search_issues.
"""
results = []
- startAt = 0
+ start_at = 0
new_results = None
jira = JIRA(JIRA_BASE_URL)
while new_results is None or len(new_results) == MAX_RESULTS:
- new_results = jira.search_issues(query, startAt=startAt, maxResults=MAX_RESULTS, **kwargs)
+ new_results = jira.search_issues(query, startAt=start_at, maxResults=MAX_RESULTS, **kwargs)
results += new_results
- startAt += len(new_results)
+ start_at += len(new_results)
return results
@@ -172,5 +172,3 @@ if __name__ == "__main__":
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
-
-
diff --git a/release/preferences.py b/release/preferences.py
index 9b57554585a..f32f1650eed 100644
--- a/release/preferences.py
+++ b/release/preferences.py
@@ -89,5 +89,3 @@ def as_json():
Export all saved preferences in JSON format.
"""
json.dumps(prefs, indent=2)
-
-
diff --git a/release/release.py b/release/release.py
index d0cba6f1782..92b76dee1e3 100644
--- a/release/release.py
+++ b/release/release.py
@@ -218,7 +218,7 @@ def verify_gpg_key():
if not gpg.key_exists(gpg_key_id):
fail(f"GPG key {gpg_key_id} not found")
if not gpg.valid_passphrase(gpg_key_id, gpg_passphrase):
- fail(f"GPG passprase not valid for key {gpg_key_id}")
+ fail(f"GPG passphrase not valid for key {gpg_key_id}")
preferences.once("verify_requirements", lambda: confirm_or_fail(templates.requirements_instructions(preferences.FILE, preferences.as_json())))
@@ -232,12 +232,12 @@ apache_id = preferences.get('apache_id', lambda: prompt("Please enter your apach
jdk21_env = get_jdk(21)
-def verify_prerequeisites():
+def verify_prerequisites():
print("Begin to check if you have met all the pre-requisites for the release process")
def prereq(name, soft_check):
try:
result = soft_check()
- if result == False:
+ if not result:
fail(f"Pre-requisite not met: {name}")
else:
print(f"Pre-requisite met: {name}")
@@ -250,7 +250,7 @@ def verify_prerequeisites():
return True
-preferences.once(f"verify_prerequeisites", verify_prerequeisites)
+preferences.once(f"verify_prerequisites", verify_prerequisites)
# Validate that the release doesn't already exist
git.fetch_tags()
@@ -360,7 +360,7 @@ cmd("Building and uploading archives", "mvn deploy -Pgpg-signing", cwd=os.path.j
# TODO: Many of these suggested validation steps could be automated
# and would help pre-validate a lot of the stuff voters test
-print(templates.sanity_check_instructions(release_version, rc_tag, apache_id))
+print(templates.sanity_check_instructions(release_version, rc_tag))
confirm_or_fail("Have you sufficiently verified the release artifacts?")
# TODO: Can we close the staging repository via a REST API since we
@@ -376,6 +376,5 @@ git.reset_hard_head()
git.switch_branch(starting_branch)
git.delete_branch(release_version)
-rc_vote_email_text = templates.rc_vote_email_text(release_version, rc, rc_tag, dev_branch, docs_release_version, apache_id)
+rc_vote_email_text = templates.rc_vote_email_text(release_version, rc, rc_tag, dev_branch, docs_release_version)
print(templates.rc_email_instructions(rc_vote_email_text))
-
diff --git a/release/runtime.py b/release/runtime.py
index c0d5d67eafb..28765f0d92b 100644
--- a/release/runtime.py
+++ b/release/runtime.py
@@ -108,7 +108,7 @@ def _prefix(prefix_str, value_str):
def cmd(action, cmd_arg, *args, **kwargs):
"""
- Execute an external command. This should be preferered over execute()
+ Execute an external command. This should be preferred over execute()
when returning the output is not necessary, as the user will be given
the option of retrying in case of a failure.
"""
@@ -144,5 +144,3 @@ def cmd(action, cmd_arg, *args, **kwargs):
print(templates.cmd_failed())
fail("")
-
-
diff --git a/release/svn.py b/release/svn.py
index 4869f79ddf1..41c2fc6d28a 100644
--- a/release/svn.py
+++ b/release/svn.py
@@ -27,7 +27,7 @@ from runtime import cmd
SVN_DEV_URL="https://dist.apache.org/repos/dist/dev/kafka"
-def delete_old_rc_directory_if_needed(rc_tag, src, work_dir):
+def delete_old_rc_directory_if_needed(rc_tag, work_dir):
svn_dev = os.path.join(work_dir, "svn_dev")
cmd_desc = f"Check if {rc_tag} exists in the subversion repository."
cmd_str = f"svn info --show-item revision {SVN_DEV_URL}/{rc_tag}"
@@ -39,7 +39,7 @@ def delete_old_rc_directory_if_needed(rc_tag, src, work_dir):
cmd(cmd_desc, cmd_str, cwd = svn_dev)
def commit_artifacts(rc_tag, src, work_dir):
- delete_old_rc_directory_if_needed(rc_tag, src, work_dir)
+ delete_old_rc_directory_if_needed(rc_tag, work_dir)
svn_dev = os.path.join(work_dir, "svn_dev")
dst = os.path.join(svn_dev, rc_tag)
print(f"Copying {src} to {dst}")
diff --git a/release/templates.py b/release/templates.py
index e067f1eb475..2f9f07b0e3c 100644
--- a/release/templates.py
+++ b/release/templates.py
@@ -154,11 +154,11 @@ Go to https://repository.apache.org/#stagingRepositories and hit 'Close' for the
There will be more than one repository entries created, please close all of them.
In some cases, you may get errors on some repositories while closing them, see KAFKA-15033.
If this is not the first RC, you need to 'Drop' the previous artifacts.
-Confirm the correct artifacts are visible at https://repository.apache.org/content/groups/staging/org/apache/kafka/
+Confirm the correct artifacts are visible at https://repository.apache.org/content/groups/staging/org/apache/kafka/ and build the
+jvm and native Docker images following these instructions: https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=34840886#ReleaseProcess-CreateJVMApacheKafkaDockerArtifacts(Forversions>=3.7.0)
"""
-
-def sanity_check_instructions(release_version, rc_tag, apache_id):
+def sanity_check_instructions(release_version, rc_tag):
return f"""
*******************************************************************************************************************************************************
Ok. We've built and staged everything for the {rc_tag}.
@@ -189,14 +189,14 @@ Some suggested steps:
"""
-def rc_vote_email_text(release_version, rc, rc_tag, dev_branch, docs_version, apache_id):
+def rc_vote_email_text(release_version, rc, rc_tag, dev_branch, docs_version):
return f"""
To: dev@kafka.apache.org, users@kafka.apache.org, kafka-clients@googlegroups.com
Subject: [VOTE] {release_version} RC{rc}
Hello Kafka users, developers and client-developers,
-This is the first candidate for release of Apache Kafka {release_version}.
+This is the candidate for release of Apache Kafka {release_version}.
@@ -221,7 +221,7 @@ apache/kafka-native:{rc_tag}
https://repository.apache.org/content/groups/staging/org/apache/kafka/
* Javadoc:
-https://dist.apache.org/repos/dist/dev/kafka/{rc_tag}/javadoc/
+https://dist.apache.org/repos/dist/dev/kafka/{rc_tag}/javadoc/index.html
* Tag to be voted upon (off {dev_branch} branch) is the {release_version} tag:
https://github.com/apache/kafka/releases/tag/{rc_tag}
@@ -233,17 +233,16 @@ https://kafka.apache.org/{docs_version}/documentation.html
https://kafka.apache.org/{docs_version}/protocol.html
* Successful CI builds for the {dev_branch} branch:
-Unit/integration tests: https://ci-builds.apache.org/job/Kafka/job/kafka/job/{dev_branch}//
--- Confluent engineers can access the semphore build to provide the build number
-System tests: https://confluent-open-source-kafka-system-test-results.s3-us-west-2.amazonaws.com/{dev_branch}//report.html
+Unit/integration tests: https://github.com/apache/kafka/actions/runs/
+System tests:
+/report.html>