KAFKA-17367: Share coordinator infra classes [1/N] (#16921)

Introduce ShareCoordinator interface and related classes.

Reviewers: Andrew Schofield <aschofield@confluent.io>, Apoorv Mittal <apoorvmittal10@gmail.com>, David Arthur <mumrah@gmail.com>, Chia-Ping Tsai <chia7712@gmail.com>
This commit is contained in:
Sushant Mahajan 2024-08-26 19:39:47 +05:30 committed by GitHub
parent 31f408d6da
commit 1621f88f06
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
15 changed files with 1114 additions and 154 deletions

View File

@ -1596,6 +1596,17 @@ project(':share-coordinator') {
implementation project(':clients')
implementation project(':coordinator-common')
implementation project(':metadata')
implementation project(':server-common')
testImplementation project(':clients').sourceSets.test.output
testImplementation project(':server-common').sourceSets.test.output
testImplementation project(':coordinator-common').sourceSets.test.output
testImplementation libs.junitJupiter
testImplementation libs.mockitoCore
testRuntimeOnly libs.slf4jReload4j
testRuntimeOnly libs.junitPlatformLanucher
generator project(':generator')
}

View File

@ -24,24 +24,33 @@
<!-- common library dependencies -->
<allow pkg="java" />
<allow pkg="org.junit.jupiter.api" />
<!-- no one depends on the server -->
<disallow pkg="kafka" />
<!-- anyone can use public classes -->
<allow pkg="org.apache.kafka.common" exact-match="true" />
<allow pkg="org.apache.kafka.common.errors" exact-match="true" />
<allow pkg="org.apache.kafka.common.memory" />
<allow pkg="org.apache.kafka.common.protocol" />
<allow pkg="org.apache.kafka.common.security" />
<allow pkg="org.apache.kafka.common.serialization" />
<allow pkg="org.apache.kafka.common.utils" />
<subpackage name="coordinator">
<subpackage name="share">
<allow pkg="org.apache.kafka.common.annotation" />
<allow pkg="org.apache.kafka.common.config" />
<allow pkg="org.apache.kafka.common.message" />
<allow pkg="org.apache.kafka.common.requests" />
<allow pkg="org.apache.kafka.coordinator.common" />
<allow pkg="org.apache.kafka.coordinator.share.generated" />
<allow pkg="org.apache.kafka.image" />
<allow pkg="org.apache.kafka.server.common" />
<allow pkg="org.apache.kafka.server.group.share" />
<subpackage name="generated">
<allow class="org.apache.kafka.common.Uuid" />
<allow pkg="com.fasterxml.jackson" />
<allow pkg="org.apache.kafka.common.errors" />
<allow pkg="org.apache.kafka.common.protocol" />
<allow pkg="org.apache.kafka.common.utils" />
<allow pkg="org.apache.kafka.coordinator.share.generated" />
</subpackage>
</subpackage>
</subpackage>

View File

@ -323,7 +323,7 @@
<!-- group coordinator -->
<suppress checks="CyclomaticComplexity"
files="(ConsumerGroupMember|GroupMetadataManager|GeneralUniformAssignmentBuilder).java"/>
files="(ConsumerGroupMember|GroupMetadataManager|GeneralUniformAssignmentBuilder|GroupCoordinatorRecordSerde).java"/>
<suppress checks="(NPathComplexity|MethodLength)"
files="(GroupMetadataManager|ConsumerGroupTest|ShareGroupTest|GroupMetadataManagerTest|GroupMetadataManagerTestContext|GeneralUniformAssignmentBuilder).java"/>
<suppress checks="ClassFanOutComplexity"
@ -331,7 +331,7 @@
<suppress checks="ParameterNumber"
files="(ConsumerGroupMember|GroupMetadataManager|GroupCoordinatorConfig).java"/>
<suppress checks="ClassDataAbstractionCouplingCheck"
files="(RecordHelpersTest|GroupCoordinatorRecordHelpers|GroupMetadataManager|GroupMetadataManagerTest|OffsetMetadataManagerTest|GroupCoordinatorServiceTest|GroupCoordinatorShardTest).java"/>
files="(RecordHelpersTest|GroupCoordinatorRecordHelpers|GroupMetadataManager|GroupMetadataManagerTest|OffsetMetadataManagerTest|GroupCoordinatorServiceTest|GroupCoordinatorShardTest|GroupCoordinatorRecordSerde).java"/>
<suppress checks="JavaNCSS"
files="(GroupMetadataManager|GroupMetadataManagerTest).java"/>

View File

@ -0,0 +1,122 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.coordinator.common.runtime;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.server.common.ApiMessageAndVersion;
import java.nio.BufferUnderflowException;
import java.nio.ByteBuffer;
/**
* Serializer/Deserializer for {@link CoordinatorRecord}. The format is defined below:
* <pre>
* record_key = [record_type key_message]
* record_value = [value_version value_message]
*
* record_type : The record type is currently define as the version of the key
* {@link ApiMessageAndVersion} object.
* key_message : The serialized message of the key {@link ApiMessageAndVersion} object.
* value_version : The value version is currently define as the version of the value
* {@link ApiMessageAndVersion} object.
* value_message : The serialized message of the value {@link ApiMessageAndVersion} object.
* </pre>
*/
public abstract class CoordinatorRecordSerde implements Serializer<CoordinatorRecord>, Deserializer<CoordinatorRecord> {
@Override
public byte[] serializeKey(CoordinatorRecord record) {
// Record does not accept a null key.
return MessageUtil.toVersionPrefixedBytes(
record.key().version(),
record.key().message()
);
}
@Override
public byte[] serializeValue(CoordinatorRecord record) {
// Tombstone is represented with a null value.
if (record.value() == null) {
return null;
} else {
return MessageUtil.toVersionPrefixedBytes(
record.value().version(),
record.value().message()
);
}
}
@Override
public CoordinatorRecord deserialize(
ByteBuffer keyBuffer,
ByteBuffer valueBuffer
) throws RuntimeException {
final short recordType = readVersion(keyBuffer, "key");
final ApiMessage keyMessage = apiMessageKeyFor(recordType);
readMessage(keyMessage, keyBuffer, recordType, "key");
if (valueBuffer == null) {
return new CoordinatorRecord(new ApiMessageAndVersion(keyMessage, recordType), null);
}
final ApiMessage valueMessage = apiMessageValueFor(recordType);
final short valueVersion = readVersion(valueBuffer, "value");
readMessage(valueMessage, valueBuffer, valueVersion, "value");
return new CoordinatorRecord(
new ApiMessageAndVersion(keyMessage, recordType),
new ApiMessageAndVersion(valueMessage, valueVersion)
);
}
private short readVersion(ByteBuffer buffer, String name) throws RuntimeException {
try {
return buffer.getShort();
} catch (BufferUnderflowException ex) {
throw new RuntimeException(String.format("Could not read version from %s's buffer.", name));
}
}
private void readMessage(ApiMessage message, ByteBuffer buffer, short version, String name) throws RuntimeException {
try {
message.read(new ByteBufferAccessor(buffer), version);
} catch (RuntimeException ex) {
throw new RuntimeException(String.format("Could not read record with version %d from %s's buffer due to: %s.",
version, name, ex.getMessage()), ex);
}
}
/**
* Concrete child class must provide implementation which returns appropriate
* type of {@link ApiMessage} objects representing the key.
*
* @param recordVersion - short representing version
* @return ApiMessage object
*/
protected abstract ApiMessage apiMessageKeyFor(short recordVersion);
/**
* Concrete child class must provide implementation which returns appropriate
* type of {@link ApiMessage} objects representing the value.
*
* @param recordVersion - short representing version
* @return ApiMessage object
*/
protected abstract ApiMessage apiMessageValueFor(short recordVersion);
}

View File

@ -17,12 +17,8 @@
package org.apache.kafka.coordinator.group;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.coordinator.common.runtime.CoordinatorLoader;
import org.apache.kafka.coordinator.common.runtime.CoordinatorRecord;
import org.apache.kafka.coordinator.common.runtime.Deserializer;
import org.apache.kafka.coordinator.common.runtime.Serializer;
import org.apache.kafka.coordinator.common.runtime.CoordinatorRecordSerde;
import org.apache.kafka.coordinator.group.generated.ConsumerGroupCurrentMemberAssignmentKey;
import org.apache.kafka.coordinator.group.generated.ConsumerGroupCurrentMemberAssignmentValue;
import org.apache.kafka.coordinator.group.generated.ConsumerGroupMemberMetadataKey;
@ -53,91 +49,11 @@ import org.apache.kafka.coordinator.group.generated.ShareGroupTargetAssignmentMe
import org.apache.kafka.coordinator.group.generated.ShareGroupTargetAssignmentMemberValue;
import org.apache.kafka.coordinator.group.generated.ShareGroupTargetAssignmentMetadataKey;
import org.apache.kafka.coordinator.group.generated.ShareGroupTargetAssignmentMetadataValue;
import org.apache.kafka.server.common.ApiMessageAndVersion;
import java.nio.BufferUnderflowException;
import java.nio.ByteBuffer;
/**
* Serializer/Deserializer for {@link CoordinatorRecord}. The format is defined below:
* <pre>
* record_key = [record_type key_message]
* record_value = [value_version value_message]
*
* record_type : The record type is currently define as the version of the key
* {@link ApiMessageAndVersion} object.
* key_message : The serialized message of the key {@link ApiMessageAndVersion} object.
* value_version : The value version is currently define as the version of the value
* {@link ApiMessageAndVersion} object.
* value_message : The serialized message of the value {@link ApiMessageAndVersion} object.
* </pre>
*/
@SuppressWarnings({ "ClassDataAbstractionCoupling", "CyclomaticComplexity" })
public class GroupCoordinatorRecordSerde implements Serializer<CoordinatorRecord>, Deserializer<CoordinatorRecord> {
public class GroupCoordinatorRecordSerde extends CoordinatorRecordSerde {
@Override
public byte[] serializeKey(CoordinatorRecord record) {
// Record does not accept a null key.
return MessageUtil.toVersionPrefixedBytes(
record.key().version(),
record.key().message()
);
}
@Override
public byte[] serializeValue(CoordinatorRecord record) {
// Tombstone is represented with a null value.
if (record.value() == null) {
return null;
} else {
return MessageUtil.toVersionPrefixedBytes(
record.value().version(),
record.value().message()
);
}
}
@Override
public CoordinatorRecord deserialize(
ByteBuffer keyBuffer,
ByteBuffer valueBuffer
) throws RuntimeException {
final short recordType = readVersion(keyBuffer, "key");
final ApiMessage keyMessage = apiMessageKeyFor(recordType);
readMessage(keyMessage, keyBuffer, recordType, "key");
if (valueBuffer == null) {
return new CoordinatorRecord(new ApiMessageAndVersion(keyMessage, recordType), null);
}
final ApiMessage valueMessage = apiMessageValueFor(recordType);
final short valueVersion = readVersion(valueBuffer, "value");
readMessage(valueMessage, valueBuffer, valueVersion, "value");
return new CoordinatorRecord(
new ApiMessageAndVersion(keyMessage, recordType),
new ApiMessageAndVersion(valueMessage, valueVersion)
);
}
private short readVersion(ByteBuffer buffer, String name) throws RuntimeException {
try {
return buffer.getShort();
} catch (BufferUnderflowException ex) {
throw new RuntimeException(String.format("Could not read version from %s's buffer.", name));
}
}
private void readMessage(ApiMessage message, ByteBuffer buffer, short version, String name) throws RuntimeException {
try {
message.read(new ByteBufferAccessor(buffer), version);
} catch (RuntimeException ex) {
throw new RuntimeException(String.format("Could not read record with version %d from %s's buffer due to: %s.",
version, name, ex.getMessage()), ex);
}
}
private ApiMessage apiMessageKeyFor(short recordType) {
switch (recordType) {
protected ApiMessage apiMessageKeyFor(short recordVersion) {
switch (recordVersion) {
case 0:
case 1:
return new OffsetCommitKey();
@ -170,12 +86,13 @@ public class GroupCoordinatorRecordSerde implements Serializer<CoordinatorRecord
case 15:
return new ShareGroupStatePartitionMetadataKey();
default:
throw new CoordinatorLoader.UnknownRecordTypeException(recordType);
throw new CoordinatorLoader.UnknownRecordTypeException(recordVersion);
}
}
private ApiMessage apiMessageValueFor(short recordType) {
switch (recordType) {
@Override
protected ApiMessage apiMessageValueFor(short recordVersion) {
switch (recordVersion) {
case 0:
case 1:
return new OffsetCommitValue();
@ -208,7 +125,7 @@ public class GroupCoordinatorRecordSerde implements Serializer<CoordinatorRecord
case 15:
return new ShareGroupStatePartitionMetadataValue();
default:
throw new CoordinatorLoader.UnknownRecordTypeException(recordType);
throw new CoordinatorLoader.UnknownRecordTypeException(recordVersion);
}
}
}

View File

@ -56,18 +56,18 @@ public class PersisterStateBatch {
public static PersisterStateBatch from(ReadShareGroupStateResponseData.StateBatch batch) {
return new PersisterStateBatch(
batch.firstOffset(),
batch.lastOffset(),
batch.deliveryState(),
batch.deliveryCount());
batch.firstOffset(),
batch.lastOffset(),
batch.deliveryState(),
batch.deliveryCount());
}
public static PersisterStateBatch from(WriteShareGroupStateRequestData.StateBatch batch) {
return new PersisterStateBatch(
batch.firstOffset(),
batch.lastOffset(),
batch.deliveryState(),
batch.deliveryCount());
batch.firstOffset(),
batch.lastOffset(),
batch.deliveryState(),
batch.deliveryCount());
}
@Override
@ -75,7 +75,10 @@ public class PersisterStateBatch {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
PersisterStateBatch that = (PersisterStateBatch) o;
return firstOffset == that.firstOffset && lastOffset == that.lastOffset && deliveryState == that.deliveryState && deliveryCount == that.deliveryCount;
return firstOffset == that.firstOffset &&
lastOffset == that.lastOffset &&
deliveryState == that.deliveryState &&
deliveryCount == that.deliveryCount;
}
@Override
@ -86,10 +89,10 @@ public class PersisterStateBatch {
@Override
public String toString() {
return "PersisterStateBatch(" +
"firstOffset=" + firstOffset + "," +
"lastOffset=" + lastOffset + "," +
"deliveryState=" + deliveryState + "," +
"deliveryCount=" + deliveryCount +
")";
"firstOffset=" + firstOffset + "," +
"lastOffset=" + lastOffset + "," +
"deliveryState=" + deliveryState + "," +
"deliveryCount=" + deliveryCount +
")";
}
}

View File

@ -1,41 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.server.group.share;
import org.apache.kafka.common.Uuid;
import java.util.Objects;
public class ShareGroupHelper {
/**
* Calculates the coordinator key for finding a share coordinator.
*
* @param groupId Group ID
* @param topicId Topic ID
* @param partition Partition index
*
* @return The coordinator key
*/
public static String coordinatorKey(String groupId, Uuid topicId, int partition) {
Objects.requireNonNull(groupId);
Objects.requireNonNull(topicId);
return String.format("%s:%s:%d", groupId, topicId, partition);
}
}

View File

@ -0,0 +1,89 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.server.group.share;
import org.apache.kafka.common.TopicIdPartition;
import org.apache.kafka.common.Uuid;
import java.util.Objects;
/**
* Common immutable share partition key class. This class is
* placed in server-common so that it can be freely used across
* various modules.
*/
public class SharePartitionKey {
private final String groupId;
private final Uuid topicId;
private final int partition;
private SharePartitionKey(String groupId, Uuid topicId, int partition) {
this.groupId = groupId;
this.topicId = topicId;
this.partition = partition;
}
public String groupId() {
return groupId;
}
public Uuid topicId() {
return topicId;
}
public int partition() {
return partition;
}
public static SharePartitionKey getInstance(String groupId, TopicIdPartition topicIdPartition) {
return getInstance(groupId, topicIdPartition.topicId(), topicIdPartition.partition());
}
public static SharePartitionKey getInstance(String groupId, Uuid topicId, int partition) {
return new SharePartitionKey(groupId, topicId, partition);
}
public String asCoordinatorKey() {
return asCoordinatorKey(groupId, topicId, partition);
}
public static String asCoordinatorKey(String groupId, Uuid topicId, int partition) {
return String.format("%s:%s:%d", groupId, topicId, partition);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof SharePartitionKey)) return false;
SharePartitionKey that = (SharePartitionKey) o;
return partition == that.partition && Objects.equals(groupId, that.groupId) && Objects.equals(topicId, that.topicId);
}
@Override
public int hashCode() {
return Objects.hash(groupId, topicId, partition);
}
@Override
public String toString() {
return "SharePartitionKey{" +
"groupId=" + groupId +
",topicId=" + topicId +
",partition=" + partition +
"}";
}
}

View File

@ -0,0 +1,90 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.coordinator.share;
import org.apache.kafka.common.message.WriteShareGroupStateRequestData;
import org.apache.kafka.server.group.share.PersisterStateBatch;
import java.util.Objects;
/**
* This is a helper class which overrides the equals and hashcode
* methods to only focus on the first and last offset fields of the
* state batch. This is useful when combining batches.
*/
public class PersisterOffsetsStateBatch {
private final PersisterStateBatch delegate;
public PersisterOffsetsStateBatch(
long firstOffset,
long lastOffset,
byte deliveryState,
short deliveryCount
) {
delegate = new PersisterStateBatch(firstOffset, lastOffset, deliveryState, deliveryCount);
}
public long firstOffset() {
return delegate.firstOffset();
}
public long lastOffset() {
return delegate.lastOffset();
}
public byte deliveryState() {
return delegate.deliveryState();
}
public short deliveryCount() {
return delegate.deliveryCount();
}
public static PersisterOffsetsStateBatch from(WriteShareGroupStateRequestData.StateBatch batch) {
return new PersisterOffsetsStateBatch(
batch.firstOffset(),
batch.lastOffset(),
batch.deliveryState(),
batch.deliveryCount()
);
}
@Override
public boolean equals(Object o) {
if (!(o instanceof PersisterOffsetsStateBatch)) {
return false;
}
PersisterOffsetsStateBatch that = (PersisterOffsetsStateBatch) o;
return this.firstOffset() == that.firstOffset() && this.lastOffset() == that.lastOffset();
}
@Override
public int hashCode() {
return Objects.hash(firstOffset(), lastOffset());
}
@Override
public String toString() {
return "PersisterOffsetsStateBatch(" +
"firstOffset=" + firstOffset() + "," +
"lastOffset=" + lastOffset() + "," +
"deliveryState=" + deliveryState() + "," +
"deliveryCount=" + deliveryCount() +
")";
}
}

View File

@ -0,0 +1,94 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.coordinator.share;
import org.apache.kafka.common.message.ReadShareGroupStateRequestData;
import org.apache.kafka.common.message.ReadShareGroupStateResponseData;
import org.apache.kafka.common.message.WriteShareGroupStateRequestData;
import org.apache.kafka.common.message.WriteShareGroupStateResponseData;
import org.apache.kafka.common.requests.RequestContext;
import java.util.OptionalInt;
import java.util.Properties;
import java.util.concurrent.CompletableFuture;
import java.util.function.IntSupplier;
public interface ShareCoordinator {
short SHARE_SNAPSHOT_RECORD_KEY_VERSION = 0;
short SHARE_SNAPSHOT_RECORD_VALUE_VERSION = 0;
short SHARE_UPDATE_RECORD_KEY_VERSION = 1;
short SHARE_UPDATE_RECORD_VALUE_VERSION = 1;
/**
* Return the partition index for the given key.
*
* @param key - groupId:topicId:partitionId.
* @return The partition index.
*/
int partitionFor(String key);
/**
* Return the configuration properties of the share-group state topic.
*
* @return Properties of the share-group state topic.
*/
Properties shareGroupStateTopicConfigs();
/**
* Start the share coordinator
*
* @param shareGroupTopicPartitionCount - supplier returning the number of partitions for __share_group_state topic
*/
void startup(IntSupplier shareGroupTopicPartitionCount);
/**
* Stop the share coordinator
*/
void shutdown();
/**
* Handle write share state call
* @param context - represents the incoming write request context
* @param request - actual RPC request object
* @return completable future comprising write RPC response data
*/
CompletableFuture<WriteShareGroupStateResponseData> writeState(RequestContext context, WriteShareGroupStateRequestData request);
/**
* Handle read share state call
* @param context - represents the incoming read request context
* @param request - actual RPC request object
* @return completable future comprising read RPC response data
*/
CompletableFuture<ReadShareGroupStateResponseData> readState(RequestContext context, ReadShareGroupStateRequestData request);
/**
* Called when new coordinator is elected
* @param partitionIndex - The partition index (internal topic)
* @param partitionLeaderEpoch - Leader epoch of the partition (internal topic)
*/
void onElection(int partitionIndex, int partitionLeaderEpoch);
/**
* Called when coordinator goes down
* @param partitionIndex - The partition index (internal topic)
* @param partitionLeaderEpoch - Leader epoch of the partition (internal topic). Empty optional means deleted.
*/
void onResignation(int partitionIndex, OptionalInt partitionLeaderEpoch);
}

View File

@ -0,0 +1,74 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.coordinator.share;
import org.apache.kafka.common.Uuid;
import org.apache.kafka.coordinator.common.runtime.CoordinatorRecord;
import org.apache.kafka.coordinator.share.generated.ShareSnapshotKey;
import org.apache.kafka.coordinator.share.generated.ShareSnapshotValue;
import org.apache.kafka.coordinator.share.generated.ShareUpdateKey;
import org.apache.kafka.coordinator.share.generated.ShareUpdateValue;
import org.apache.kafka.server.common.ApiMessageAndVersion;
import java.util.stream.Collectors;
public class ShareCoordinatorRecordHelpers {
public static CoordinatorRecord newShareSnapshotRecord(String groupId, Uuid topicId, int partitionId, ShareGroupOffset offsetData) {
return new CoordinatorRecord(
new ApiMessageAndVersion(new ShareSnapshotKey()
.setGroupId(groupId)
.setTopicId(topicId)
.setPartition(partitionId),
ShareCoordinator.SHARE_SNAPSHOT_RECORD_KEY_VERSION),
new ApiMessageAndVersion(new ShareSnapshotValue()
.setSnapshotEpoch(offsetData.snapshotEpoch())
.setStateEpoch(offsetData.stateEpoch())
.setLeaderEpoch(offsetData.leaderEpoch())
.setStartOffset(offsetData.startOffset())
.setStateBatches(offsetData.stateBatches().stream()
.map(batch -> new ShareSnapshotValue.StateBatch()
.setFirstOffset(batch.firstOffset())
.setLastOffset(batch.lastOffset())
.setDeliveryCount(batch.deliveryCount())
.setDeliveryState(batch.deliveryState()))
.collect(Collectors.toList())),
(short) 0)
);
}
public static CoordinatorRecord newShareSnapshotUpdateRecord(String groupId, Uuid topicId, int partitionId, ShareGroupOffset offsetData) {
return new CoordinatorRecord(
new ApiMessageAndVersion(new ShareUpdateKey()
.setGroupId(groupId)
.setTopicId(topicId)
.setPartition(partitionId),
ShareCoordinator.SHARE_UPDATE_RECORD_KEY_VERSION),
new ApiMessageAndVersion(new ShareUpdateValue()
.setSnapshotEpoch(offsetData.snapshotEpoch())
.setLeaderEpoch(offsetData.leaderEpoch())
.setStartOffset(offsetData.startOffset())
.setStateBatches(offsetData.stateBatches().stream()
.map(batch -> new ShareUpdateValue.StateBatch()
.setFirstOffset(batch.firstOffset())
.setLastOffset(batch.lastOffset())
.setDeliveryCount(batch.deliveryCount())
.setDeliveryState(batch.deliveryState()))
.collect(Collectors.toList())),
(short) 0)
);
}
}

View File

@ -0,0 +1,52 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.coordinator.share;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.coordinator.common.runtime.CoordinatorLoader;
import org.apache.kafka.coordinator.common.runtime.CoordinatorRecordSerde;
import org.apache.kafka.coordinator.share.generated.ShareSnapshotKey;
import org.apache.kafka.coordinator.share.generated.ShareSnapshotValue;
import org.apache.kafka.coordinator.share.generated.ShareUpdateKey;
import org.apache.kafka.coordinator.share.generated.ShareUpdateValue;
public class ShareCoordinatorRecordSerde extends CoordinatorRecordSerde {
@Override
protected ApiMessage apiMessageKeyFor(short recordVersion) {
switch (recordVersion) {
case ShareCoordinator.SHARE_SNAPSHOT_RECORD_KEY_VERSION:
return new ShareSnapshotKey();
case ShareCoordinator.SHARE_UPDATE_RECORD_KEY_VERSION:
return new ShareUpdateKey();
default:
throw new CoordinatorLoader.UnknownRecordTypeException(recordVersion);
}
}
@Override
protected ApiMessage apiMessageValueFor(short recordVersion) {
switch (recordVersion) {
case ShareCoordinator.SHARE_SNAPSHOT_RECORD_VALUE_VERSION:
return new ShareSnapshotValue();
case ShareCoordinator.SHARE_UPDATE_RECORD_VALUE_VERSION:
return new ShareUpdateValue();
default:
throw new CoordinatorLoader.UnknownRecordTypeException(recordVersion);
}
}
}

View File

@ -0,0 +1,174 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.coordinator.share;
import org.apache.kafka.common.message.WriteShareGroupStateRequestData;
import org.apache.kafka.coordinator.share.generated.ShareSnapshotValue;
import org.apache.kafka.coordinator.share.generated.ShareUpdateValue;
import java.util.Collections;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;
/**
* Container class to represent data encapsulated in {@link ShareSnapshotValue} and {@link ShareUpdateValue}
* This class is effectively immutable (state batches is not modified out of context).
*/
public class ShareGroupOffset {
private final int snapshotEpoch;
private final int stateEpoch;
private final int leaderEpoch;
private final long startOffset;
private final List<PersisterOffsetsStateBatch> stateBatches;
private ShareGroupOffset(int snapshotEpoch,
int stateEpoch,
int leaderEpoch,
long startOffset,
List<PersisterOffsetsStateBatch> stateBatches) {
this.snapshotEpoch = snapshotEpoch;
this.stateEpoch = stateEpoch;
this.leaderEpoch = leaderEpoch;
this.startOffset = startOffset;
this.stateBatches = stateBatches;
}
public int snapshotEpoch() {
return snapshotEpoch;
}
public int stateEpoch() {
return stateEpoch;
}
public int leaderEpoch() {
return leaderEpoch;
}
public long startOffset() {
return startOffset;
}
public List<PersisterOffsetsStateBatch> stateBatches() {
return Collections.unmodifiableList(stateBatches);
}
private static PersisterOffsetsStateBatch toPersisterOffsetsStateBatch(ShareSnapshotValue.StateBatch stateBatch) {
return new PersisterOffsetsStateBatch(stateBatch.firstOffset(), stateBatch.lastOffset(), stateBatch.deliveryState(), stateBatch.deliveryCount());
}
private static PersisterOffsetsStateBatch toPersisterOffsetsStateBatch(ShareUpdateValue.StateBatch stateBatch) {
return new PersisterOffsetsStateBatch(stateBatch.firstOffset(), stateBatch.lastOffset(), stateBatch.deliveryState(), stateBatch.deliveryCount());
}
public static ShareGroupOffset fromRecord(ShareSnapshotValue record) {
return new ShareGroupOffset(record.snapshotEpoch(), record.stateEpoch(), record.leaderEpoch(), record.startOffset(), record.stateBatches().stream()
.map(ShareGroupOffset::toPersisterOffsetsStateBatch).collect(Collectors.toList()));
}
public static ShareGroupOffset fromRecord(ShareUpdateValue record) {
return new ShareGroupOffset(record.snapshotEpoch(), -1, record.leaderEpoch(), record.startOffset(), record.stateBatches().stream()
.map(ShareGroupOffset::toPersisterOffsetsStateBatch).collect(Collectors.toList()));
}
public static ShareGroupOffset fromRequest(WriteShareGroupStateRequestData.PartitionData data) {
return fromRequest(data, 0);
}
public static ShareGroupOffset fromRequest(WriteShareGroupStateRequestData.PartitionData data, int snapshotEpoch) {
return new ShareGroupOffset(snapshotEpoch,
data.stateEpoch(),
data.leaderEpoch(),
data.startOffset(),
data.stateBatches().stream()
.map(PersisterOffsetsStateBatch::from)
.collect(Collectors.toList()));
}
public Set<PersisterOffsetsStateBatch> stateBatchAsSet() {
return new LinkedHashSet<>(stateBatches);
}
public static class Builder {
private int snapshotEpoch;
private int stateEpoch;
private int leaderEpoch;
private long startOffset;
private List<PersisterOffsetsStateBatch> stateBatches;
public Builder setSnapshotEpoch(int snapshotEpoch) {
this.snapshotEpoch = snapshotEpoch;
return this;
}
public Builder setStateEpoch(int stateEpoch) {
this.stateEpoch = stateEpoch;
return this;
}
public Builder setLeaderEpoch(int leaderEpoch) {
this.leaderEpoch = leaderEpoch;
return this;
}
public Builder setStartOffset(long startOffset) {
this.startOffset = startOffset;
return this;
}
public Builder setStateBatches(List<PersisterOffsetsStateBatch> stateBatches) {
this.stateBatches = stateBatches;
return this;
}
public ShareGroupOffset build() {
return new ShareGroupOffset(snapshotEpoch, stateEpoch, leaderEpoch, startOffset, stateBatches);
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ShareGroupOffset that = (ShareGroupOffset) o;
return snapshotEpoch == that.snapshotEpoch &&
stateEpoch == that.stateEpoch &&
leaderEpoch == that.leaderEpoch &&
startOffset == that.startOffset &&
Objects.equals(stateBatches, that.stateBatches);
}
@Override
public int hashCode() {
return Objects.hash(snapshotEpoch, stateEpoch, leaderEpoch, startOffset, stateBatches);
}
@Override
public String toString() {
return "ShareGroupOffset{" +
"snapshotEpoch=" + snapshotEpoch +
", stateEpoch=" + stateEpoch +
", leaderEpoch=" + leaderEpoch +
", startOffset=" + startOffset +
", stateBatches=" + stateBatches +
'}';
}
}

View File

@ -0,0 +1,118 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.coordinator.share;
import org.apache.kafka.common.Uuid;
import org.apache.kafka.coordinator.common.runtime.CoordinatorRecord;
import org.apache.kafka.coordinator.share.generated.ShareSnapshotKey;
import org.apache.kafka.coordinator.share.generated.ShareSnapshotValue;
import org.apache.kafka.coordinator.share.generated.ShareUpdateKey;
import org.apache.kafka.coordinator.share.generated.ShareUpdateValue;
import org.apache.kafka.server.common.ApiMessageAndVersion;
import org.junit.jupiter.api.Test;
import java.util.Collections;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class ShareCoordinatorRecordHelpersTest {
@Test
public void testNewShareSnapshotRecord() {
String groupId = "test-group";
Uuid topicId = Uuid.randomUuid();
int partitionId = 1;
PersisterOffsetsStateBatch batch = new PersisterOffsetsStateBatch(1L, 10L, (byte) 0, (short) 1);
CoordinatorRecord record = ShareCoordinatorRecordHelpers.newShareSnapshotRecord(
groupId,
topicId,
partitionId,
new ShareGroupOffset.Builder()
.setSnapshotEpoch(0)
.setStateEpoch(1)
.setLeaderEpoch(5)
.setStartOffset(0)
.setStateBatches(Collections.singletonList(batch))
.build()
);
CoordinatorRecord expectedRecord = new CoordinatorRecord(
new ApiMessageAndVersion(
new ShareSnapshotKey()
.setGroupId(groupId)
.setTopicId(topicId)
.setPartition(partitionId),
ShareCoordinator.SHARE_SNAPSHOT_RECORD_KEY_VERSION),
new ApiMessageAndVersion(
new ShareSnapshotValue()
.setSnapshotEpoch(0)
.setStateEpoch(1)
.setLeaderEpoch(5)
.setStartOffset(0)
.setStateBatches(Collections.singletonList(
new ShareSnapshotValue.StateBatch()
.setFirstOffset(1L)
.setLastOffset(10L)
.setDeliveryState((byte) 0)
.setDeliveryCount((short) 1))),
(short) 0));
assertEquals(expectedRecord, record);
}
@Test
public void testNewShareUpdateRecord() {
String groupId = "test-group";
Uuid topicId = Uuid.randomUuid();
int partitionId = 1;
PersisterOffsetsStateBatch batch = new PersisterOffsetsStateBatch(1L, 10L, (byte) 0, (short) 1);
CoordinatorRecord record = ShareCoordinatorRecordHelpers.newShareSnapshotUpdateRecord(
groupId,
topicId,
partitionId,
new ShareGroupOffset.Builder()
.setSnapshotEpoch(0)
.setStateEpoch(-1) // ignored for share update
.setLeaderEpoch(5)
.setStartOffset(0)
.setStateBatches(Collections.singletonList(batch))
.build()
);
CoordinatorRecord expectedRecord = new CoordinatorRecord(
new ApiMessageAndVersion(
new ShareUpdateKey()
.setGroupId(groupId)
.setTopicId(topicId)
.setPartition(partitionId),
ShareCoordinator.SHARE_UPDATE_RECORD_KEY_VERSION),
new ApiMessageAndVersion(
new ShareUpdateValue()
.setSnapshotEpoch(0)
.setLeaderEpoch(5)
.setStartOffset(0)
.setStateBatches(Collections.singletonList(
new ShareUpdateValue.StateBatch()
.setFirstOffset(1L)
.setLastOffset(10L)
.setDeliveryState((byte) 0)
.setDeliveryCount((short) 1))),
(short) 0));
assertEquals(expectedRecord, record);
}
}

View File

@ -0,0 +1,248 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.coordinator.share;
import org.apache.kafka.common.Uuid;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.coordinator.common.runtime.CoordinatorLoader;
import org.apache.kafka.coordinator.common.runtime.CoordinatorRecord;
import org.apache.kafka.coordinator.share.generated.ShareSnapshotKey;
import org.apache.kafka.coordinator.share.generated.ShareSnapshotValue;
import org.apache.kafka.coordinator.share.generated.ShareUpdateKey;
import org.apache.kafka.coordinator.share.generated.ShareUpdateValue;
import org.apache.kafka.server.common.ApiMessageAndVersion;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.nio.ByteBuffer;
import java.util.Collections;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class ShareCoordinatorRecordSerdeTest {
private ShareCoordinatorRecordSerde serde;
@BeforeEach
public void setup() {
serde = new ShareCoordinatorRecordSerde();
}
@Test
public void testSerializeKey() {
CoordinatorRecord record = getShareSnapshotRecord("groupId", Uuid.randomUuid(), 1);
assertArrayEquals(
MessageUtil.toVersionPrefixedBytes(record.key().version(), record.key().message()),
serde.serializeKey(record)
);
}
@Test
public void testSerializeValue() {
CoordinatorRecord record = getShareSnapshotRecord("groupId", Uuid.randomUuid(), 1);
assertArrayEquals(
MessageUtil.toVersionPrefixedBytes(record.value().version(), record.value().message()),
serde.serializeValue(record)
);
}
@Test
public void testSerializeNullValue() {
CoordinatorRecord record = new CoordinatorRecord(
new ApiMessageAndVersion(
new ShareSnapshotKey()
.setGroupId("group")
.setTopicId(Uuid.randomUuid())
.setPartition(1),
ShareCoordinator.SHARE_SNAPSHOT_RECORD_KEY_VERSION
),
null
);
assertNull(serde.serializeValue(record));
}
@Test
public void testDeserialize() {
CoordinatorRecord record = getShareSnapshotRecord("groupId", Uuid.randomUuid(), 1);
ApiMessageAndVersion key = record.key();
ByteBuffer keyBuffer = MessageUtil.toVersionPrefixedByteBuffer(key.version(), key.message());
ApiMessageAndVersion value = record.value();
ByteBuffer valueBuffer = MessageUtil.toVersionPrefixedByteBuffer(value.version(), value.message());
CoordinatorRecord desRecord = serde.deserialize(keyBuffer, valueBuffer);
assertEquals(key, desRecord.key());
assertEquals(value, desRecord.value());
}
@Test
public void testDeserializeWithTombstoneForValue() {
ApiMessageAndVersion key = new ApiMessageAndVersion(
new ShareSnapshotKey()
.setGroupId("groupId")
.setTopicId(Uuid.randomUuid())
.setPartition(1),
ShareCoordinator.SHARE_SNAPSHOT_RECORD_KEY_VERSION
);
ByteBuffer keyBuffer = MessageUtil.toVersionPrefixedByteBuffer(key.version(), key.message());
CoordinatorRecord record = serde.deserialize(keyBuffer, null);
assertEquals(key, record.key());
assertNull(record.value());
}
@Test
public void testDeserializeWithInvalidRecordType() {
ByteBuffer keyBuffer = ByteBuffer.allocate(64);
keyBuffer.putShort((short) 255);
keyBuffer.rewind();
ByteBuffer valueBuffer = ByteBuffer.allocate(64);
CoordinatorLoader.UnknownRecordTypeException ex =
assertThrows(CoordinatorLoader.UnknownRecordTypeException.class,
() -> serde.deserialize(keyBuffer, valueBuffer));
assertEquals((short) 255, ex.unknownType());
}
@Test
public void testDeserializeWithKeyEmptyBuffer() {
ByteBuffer keyBuffer = ByteBuffer.allocate(0);
ByteBuffer valueBuffer = ByteBuffer.allocate(64);
RuntimeException ex =
assertThrows(RuntimeException.class,
() -> serde.deserialize(keyBuffer, valueBuffer));
assertEquals("Could not read version from key's buffer.", ex.getMessage());
}
@Test
public void testDeserializeWithValueEmptyBuffer() {
ApiMessageAndVersion key = new ApiMessageAndVersion(
new ShareSnapshotKey()
.setGroupId("foo")
.setTopicId(Uuid.randomUuid())
.setPartition(1),
ShareCoordinator.SHARE_SNAPSHOT_RECORD_KEY_VERSION
);
ByteBuffer keyBuffer = MessageUtil.toVersionPrefixedByteBuffer(key.version(), key.message());
ByteBuffer valueBuffer = ByteBuffer.allocate(0);
RuntimeException ex =
assertThrows(RuntimeException.class,
() -> serde.deserialize(keyBuffer, valueBuffer));
assertEquals("Could not read version from value's buffer.", ex.getMessage());
}
@Test
public void testDeserializeWithInvalidKeyBytes() {
ByteBuffer keyBuffer = ByteBuffer.allocate(2);
keyBuffer.putShort((short) 0);
keyBuffer.rewind();
ByteBuffer valueBuffer = ByteBuffer.allocate(2);
valueBuffer.putShort((short) 0);
valueBuffer.rewind();
RuntimeException ex =
assertThrows(RuntimeException.class,
() -> serde.deserialize(keyBuffer, valueBuffer));
assertTrue(ex.getMessage().startsWith("Could not read record with version 0 from key's buffer due to"),
ex.getMessage());
}
@Test
public void testDeserializeWithInvalidValueBytes() {
ApiMessageAndVersion key = new ApiMessageAndVersion(
new ShareSnapshotKey()
.setGroupId("foo")
.setTopicId(Uuid.randomUuid())
.setPartition(1),
ShareCoordinator.SHARE_SNAPSHOT_RECORD_KEY_VERSION
);
ByteBuffer keyBuffer = MessageUtil.toVersionPrefixedByteBuffer(key.version(), key.message());
ByteBuffer valueBuffer = ByteBuffer.allocate(2);
valueBuffer.putShort((short) 0);
valueBuffer.rewind();
RuntimeException ex =
assertThrows(RuntimeException.class,
() -> serde.deserialize(keyBuffer, valueBuffer));
assertTrue(ex.getMessage().startsWith("Could not read record with version 0 from value's buffer due to"),
ex.getMessage());
}
@Test
public void testDeserializeAllRecordTypes() {
roundTrip((short) 0, new ShareSnapshotKey(), new ShareSnapshotValue());
roundTrip((short) 1, new ShareUpdateKey(), new ShareUpdateValue());
}
private void roundTrip(
short recordType,
ApiMessage key,
ApiMessage val
) {
for (short version = val.lowestSupportedVersion(); version < val.highestSupportedVersion(); version++) {
ApiMessageAndVersion keyMessageAndVersion = new ApiMessageAndVersion(key, recordType);
ApiMessageAndVersion valMessageAndVersion = new ApiMessageAndVersion(val, version);
CoordinatorRecord record = serde.deserialize(
MessageUtil.toVersionPrefixedByteBuffer(recordType, key),
MessageUtil.toVersionPrefixedByteBuffer(version, val)
);
assertEquals(keyMessageAndVersion, record.key());
assertEquals(valMessageAndVersion, record.value());
}
}
private static CoordinatorRecord getShareSnapshotRecord(String groupId, Uuid topicId, int partitionId) {
return new CoordinatorRecord(
new ApiMessageAndVersion(
new ShareSnapshotKey()
.setGroupId(groupId)
.setTopicId(topicId)
.setPartition(partitionId),
ShareCoordinator.SHARE_SNAPSHOT_RECORD_KEY_VERSION
),
new ApiMessageAndVersion(
new ShareSnapshotValue()
.setStartOffset(1L)
.setLeaderEpoch(2)
.setStateEpoch(1)
.setSnapshotEpoch(1)
.setStateBatches(Collections.singletonList(new ShareSnapshotValue.StateBatch()
.setFirstOffset(1)
.setLastOffset(10)
.setDeliveryState((byte) 0)
.setDeliveryCount((short) 1))),
(short) 0
)
);
}
}