MINOR: remove FetchResponse.AbortedTransaction and redundant construc… (#9758)

1. rename INVALID_HIGHWATERMARK to INVALID_HIGH_WATERMARK
2. replace FetchResponse.AbortedTransaction by FetchResponseData.AbortedTransaction
3. remove redundant constructors from FetchResponse.PartitionData
4. rename recordSet to records
5. add helpers "recordsOrFail" and "recordsSize" to FetchResponse to process record casting

Reviewers: Ismael Juma <ismael@juma.me.uk>
This commit is contained in:
Chia-Ping Tsai 2021-03-04 18:06:50 +08:00 committed by GitHub
parent 3ef39e1365
commit 8205051e90
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
45 changed files with 1102 additions and 1047 deletions

View File

@ -318,7 +318,7 @@ public class FetchSessionHandler {
* @param response The response.
* @return True if the full fetch response partitions are valid.
*/
String verifyFullFetchResponsePartitions(FetchResponse<?> response) {
String verifyFullFetchResponsePartitions(FetchResponse response) {
StringBuilder bld = new StringBuilder();
Set<TopicPartition> extra =
findMissing(response.responseData().keySet(), sessionPartitions.keySet());
@ -343,7 +343,7 @@ public class FetchSessionHandler {
* @param response The response.
* @return True if the incremental fetch response partitions are valid.
*/
String verifyIncrementalFetchResponsePartitions(FetchResponse<?> response) {
String verifyIncrementalFetchResponsePartitions(FetchResponse response) {
Set<TopicPartition> extra =
findMissing(response.responseData().keySet(), sessionPartitions.keySet());
if (!extra.isEmpty()) {
@ -362,7 +362,7 @@ public class FetchSessionHandler {
* @param response The FetchResponse.
* @return The string to log.
*/
private String responseDataToLogString(FetchResponse<?> response) {
private String responseDataToLogString(FetchResponse response) {
if (!log.isTraceEnabled()) {
int implied = sessionPartitions.size() - response.responseData().size();
if (implied > 0) {
@ -398,7 +398,7 @@ public class FetchSessionHandler {
* @return True if the response is well-formed; false if it can't be processed
* because of missing or unexpected partitions.
*/
public boolean handleResponse(FetchResponse<?> response) {
public boolean handleResponse(FetchResponse response) {
if (response.error() != Errors.NONE) {
log.info("Node {} was unable to process the fetch request with {}: {}.",
node, nextMetadata, response.error());

View File

@ -47,6 +47,7 @@ import org.apache.kafka.common.errors.TimeoutException;
import org.apache.kafka.common.errors.TopicAuthorizationException;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.header.internals.RecordHeaders;
import org.apache.kafka.common.message.FetchResponseData;
import org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion;
import org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsPartition;
import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsPartitionResponse;
@ -66,7 +67,6 @@ import org.apache.kafka.common.utils.BufferSupplier;
import org.apache.kafka.common.record.ControlRecordType;
import org.apache.kafka.common.record.Record;
import org.apache.kafka.common.record.RecordBatch;
import org.apache.kafka.common.record.Records;
import org.apache.kafka.common.record.TimestampType;
import org.apache.kafka.common.requests.FetchRequest;
import org.apache.kafka.common.requests.FetchResponse;
@ -277,7 +277,7 @@ public class Fetcher<K, V> implements Closeable {
synchronized (Fetcher.this) {
try {
@SuppressWarnings("unchecked")
FetchResponse<Records> response = (FetchResponse<Records>) resp.responseBody();
FetchResponse response = (FetchResponse) resp.responseBody();
FetchSessionHandler handler = sessionHandler(fetchTarget.id());
if (handler == null) {
log.error("Unable to find FetchSessionHandler for node {}. Ignoring fetch response.",
@ -291,7 +291,7 @@ public class Fetcher<K, V> implements Closeable {
Set<TopicPartition> partitions = new HashSet<>(response.responseData().keySet());
FetchResponseMetricAggregator metricAggregator = new FetchResponseMetricAggregator(sensors, partitions);
for (Map.Entry<TopicPartition, FetchResponse.PartitionData<Records>> entry : response.responseData().entrySet()) {
for (Map.Entry<TopicPartition, FetchResponseData.PartitionData> entry : response.responseData().entrySet()) {
TopicPartition partition = entry.getKey();
FetchRequest.PartitionData requestData = data.sessionPartitions().get(partition);
if (requestData == null) {
@ -310,12 +310,12 @@ public class Fetcher<K, V> implements Closeable {
throw new IllegalStateException(message);
} else {
long fetchOffset = requestData.fetchOffset;
FetchResponse.PartitionData<Records> partitionData = entry.getValue();
FetchResponseData.PartitionData partitionData = entry.getValue();
log.debug("Fetch {} at offset {} for partition {} returned fetch data {}",
isolationLevel, fetchOffset, partition, partitionData);
Iterator<? extends RecordBatch> batches = partitionData.records().batches().iterator();
Iterator<? extends RecordBatch> batches = FetchResponse.recordsOrFail(partitionData).batches().iterator();
short responseVersion = resp.requestHeader().apiVersion();
completedFetches.add(new CompletedFetch(partition, partitionData,
@ -618,8 +618,8 @@ public class Fetcher<K, V> implements Closeable {
// The first condition ensures that the completedFetches is not stuck with the same completedFetch
// in cases such as the TopicAuthorizationException, and the second condition ensures that no
// potential data loss due to an exception in a following record.
FetchResponse.PartitionData<Records> partition = records.partitionData;
if (fetched.isEmpty() && (partition.records() == null || partition.records().sizeInBytes() == 0)) {
FetchResponseData.PartitionData partition = records.partitionData;
if (fetched.isEmpty() && FetchResponse.recordsOrFail(partition).sizeInBytes() == 0) {
completedFetches.poll();
}
throw e;
@ -1229,10 +1229,10 @@ public class Fetcher<K, V> implements Closeable {
*/
private CompletedFetch initializeCompletedFetch(CompletedFetch nextCompletedFetch) {
TopicPartition tp = nextCompletedFetch.partition;
FetchResponse.PartitionData<Records> partition = nextCompletedFetch.partitionData;
FetchResponseData.PartitionData partition = nextCompletedFetch.partitionData;
long fetchOffset = nextCompletedFetch.nextFetchOffset;
CompletedFetch completedFetch = null;
Errors error = partition.error();
Errors error = Errors.forCode(partition.errorCode());
try {
if (!subscriptions.hasValidPosition(tp)) {
@ -1249,11 +1249,11 @@ public class Fetcher<K, V> implements Closeable {
}
log.trace("Preparing to read {} bytes of data for partition {} with offset {}",
partition.records().sizeInBytes(), tp, position);
Iterator<? extends RecordBatch> batches = partition.records().batches().iterator();
FetchResponse.recordsSize(partition), tp, position);
Iterator<? extends RecordBatch> batches = FetchResponse.recordsOrFail(partition).batches().iterator();
completedFetch = nextCompletedFetch;
if (!batches.hasNext() && partition.records().sizeInBytes() > 0) {
if (!batches.hasNext() && FetchResponse.recordsSize(partition) > 0) {
if (completedFetch.responseVersion < 3) {
// Implement the pre KIP-74 behavior of throwing a RecordTooLargeException.
Map<TopicPartition, Long> recordTooLargePartitions = Collections.singletonMap(tp, fetchOffset);
@ -1286,11 +1286,11 @@ public class Fetcher<K, V> implements Closeable {
subscriptions.updateLastStableOffset(tp, partition.lastStableOffset());
}
if (partition.preferredReadReplica().isPresent()) {
subscriptions.updatePreferredReadReplica(completedFetch.partition, partition.preferredReadReplica().get(), () -> {
if (FetchResponse.isPreferredReplica(partition)) {
subscriptions.updatePreferredReadReplica(completedFetch.partition, partition.preferredReadReplica(), () -> {
long expireTimeMs = time.milliseconds() + metadata.metadataExpireMs();
log.debug("Updating preferred read replica for partition {} to {}, set to expire at {}",
tp, partition.preferredReadReplica().get(), expireTimeMs);
tp, partition.preferredReadReplica(), expireTimeMs);
return expireTimeMs;
});
}
@ -1455,8 +1455,8 @@ public class Fetcher<K, V> implements Closeable {
private final TopicPartition partition;
private final Iterator<? extends RecordBatch> batches;
private final Set<Long> abortedProducerIds;
private final PriorityQueue<FetchResponse.AbortedTransaction> abortedTransactions;
private final FetchResponse.PartitionData<Records> partitionData;
private final PriorityQueue<FetchResponseData.AbortedTransaction> abortedTransactions;
private final FetchResponseData.PartitionData partitionData;
private final FetchResponseMetricAggregator metricAggregator;
private final short responseVersion;
@ -1473,7 +1473,7 @@ public class Fetcher<K, V> implements Closeable {
private boolean initialized = false;
private CompletedFetch(TopicPartition partition,
FetchResponse.PartitionData<Records> partitionData,
FetchResponseData.PartitionData partitionData,
FetchResponseMetricAggregator metricAggregator,
Iterator<? extends RecordBatch> batches,
Long fetchOffset,
@ -1641,9 +1641,9 @@ public class Fetcher<K, V> implements Closeable {
if (abortedTransactions == null)
return;
while (!abortedTransactions.isEmpty() && abortedTransactions.peek().firstOffset <= offset) {
FetchResponse.AbortedTransaction abortedTransaction = abortedTransactions.poll();
abortedProducerIds.add(abortedTransaction.producerId);
while (!abortedTransactions.isEmpty() && abortedTransactions.peek().firstOffset() <= offset) {
FetchResponseData.AbortedTransaction abortedTransaction = abortedTransactions.poll();
abortedProducerIds.add(abortedTransaction.producerId());
}
}
@ -1651,12 +1651,12 @@ public class Fetcher<K, V> implements Closeable {
return batch.isTransactional() && abortedProducerIds.contains(batch.producerId());
}
private PriorityQueue<FetchResponse.AbortedTransaction> abortedTransactions(FetchResponse.PartitionData<?> partition) {
private PriorityQueue<FetchResponseData.AbortedTransaction> abortedTransactions(FetchResponseData.PartitionData partition) {
if (partition.abortedTransactions() == null || partition.abortedTransactions().isEmpty())
return null;
PriorityQueue<FetchResponse.AbortedTransaction> abortedTransactions = new PriorityQueue<>(
partition.abortedTransactions().size(), Comparator.comparingLong(o -> o.firstOffset)
PriorityQueue<FetchResponseData.AbortedTransaction> abortedTransactions = new PriorityQueue<>(
partition.abortedTransactions().size(), Comparator.comparingLong(FetchResponseData.AbortedTransaction::firstOffset)
);
abortedTransactions.addAll(partition.abortedTransactions());
return abortedTransactions;

View File

@ -19,10 +19,10 @@ package org.apache.kafka.common.requests;
import org.apache.kafka.common.IsolationLevel;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.message.FetchRequestData;
import org.apache.kafka.common.message.FetchResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.record.MemoryRecords;
import org.apache.kafka.common.record.RecordBatch;
import org.apache.kafka.common.utils.Utils;
@ -296,14 +296,11 @@ public class FetchRequest extends AbstractRequest {
// may not be any partitions at all in the response. For this reason, the top-level error code
// is essential for them.
Errors error = Errors.forException(e);
LinkedHashMap<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> responseData = new LinkedHashMap<>();
LinkedHashMap<TopicPartition, FetchResponseData.PartitionData> responseData = new LinkedHashMap<>();
for (Map.Entry<TopicPartition, PartitionData> entry : fetchData.entrySet()) {
FetchResponse.PartitionData<MemoryRecords> partitionResponse = new FetchResponse.PartitionData<>(error,
FetchResponse.INVALID_HIGHWATERMARK, FetchResponse.INVALID_LAST_STABLE_OFFSET,
FetchResponse.INVALID_LOG_START_OFFSET, Optional.empty(), null, MemoryRecords.EMPTY);
responseData.put(entry.getKey(), partitionResponse);
responseData.put(entry.getKey(), FetchResponse.partitionResponse(entry.getKey().partition(), error));
}
return new FetchResponse<>(error, responseData, throttleTimeMs, data.sessionId());
return FetchResponse.of(error, throttleTimeMs, data.sessionId(), responseData);
}
public int replicaId() {

View File

@ -22,8 +22,8 @@ import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.record.BaseRecords;
import org.apache.kafka.common.record.MemoryRecords;
import org.apache.kafka.common.record.Records;
import java.nio.ByteBuffer;
import java.util.ArrayList;
@ -33,7 +33,6 @@ import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.stream.Collectors;
import static org.apache.kafka.common.requests.FetchMetadata.INVALID_SESSION_ID;
@ -57,238 +56,43 @@ import static org.apache.kafka.common.requests.FetchMetadata.INVALID_SESSION_ID;
* the fetch offset after the index lookup
* - {@link Errors#UNKNOWN_SERVER_ERROR} For any unexpected errors
*/
public class FetchResponse<T extends BaseRecords> extends AbstractResponse {
public static final long INVALID_HIGHWATERMARK = -1L;
public class FetchResponse extends AbstractResponse {
public static final long INVALID_HIGH_WATERMARK = -1L;
public static final long INVALID_LAST_STABLE_OFFSET = -1L;
public static final long INVALID_LOG_START_OFFSET = -1L;
public static final int INVALID_PREFERRED_REPLICA_ID = -1;
private final FetchResponseData data;
private final LinkedHashMap<TopicPartition, PartitionData<T>> responseDataMap;
// we build responseData when needed.
private volatile LinkedHashMap<TopicPartition, FetchResponseData.PartitionData> responseData = null;
@Override
public FetchResponseData data() {
return data;
}
public static final class AbortedTransaction {
public final long producerId;
public final long firstOffset;
public AbortedTransaction(long producerId, long firstOffset) {
this.producerId = producerId;
this.firstOffset = firstOffset;
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
AbortedTransaction that = (AbortedTransaction) o;
return producerId == that.producerId && firstOffset == that.firstOffset;
}
@Override
public int hashCode() {
int result = Long.hashCode(producerId);
result = 31 * result + Long.hashCode(firstOffset);
return result;
}
@Override
public String toString() {
return "(producerId=" + producerId + ", firstOffset=" + firstOffset + ")";
}
static AbortedTransaction fromMessage(FetchResponseData.AbortedTransaction abortedTransaction) {
return new AbortedTransaction(abortedTransaction.producerId(), abortedTransaction.firstOffset());
}
}
public static final class PartitionData<T extends BaseRecords> {
private final FetchResponseData.FetchablePartitionResponse partitionResponse;
// Derived fields
private final Optional<Integer> preferredReplica;
private final List<AbortedTransaction> abortedTransactions;
private final Errors error;
private PartitionData(FetchResponseData.FetchablePartitionResponse partitionResponse) {
// We partially construct FetchablePartitionResponse since we don't know the partition ID at this point
// When we convert the PartitionData (and other fields) into FetchResponseData down in toMessage, we
// set the partition IDs.
this.partitionResponse = partitionResponse;
this.preferredReplica = Optional.of(partitionResponse.preferredReadReplica())
.filter(replicaId -> replicaId != INVALID_PREFERRED_REPLICA_ID);
if (partitionResponse.abortedTransactions() == null) {
this.abortedTransactions = null;
} else {
this.abortedTransactions = partitionResponse.abortedTransactions().stream()
.map(AbortedTransaction::fromMessage)
.collect(Collectors.toList());
}
this.error = Errors.forCode(partitionResponse.errorCode());
}
public PartitionData(Errors error,
long highWatermark,
long lastStableOffset,
long logStartOffset,
Optional<Integer> preferredReadReplica,
List<AbortedTransaction> abortedTransactions,
Optional<FetchResponseData.EpochEndOffset> divergingEpoch,
T records) {
this.preferredReplica = preferredReadReplica;
this.abortedTransactions = abortedTransactions;
this.error = error;
FetchResponseData.FetchablePartitionResponse partitionResponse =
new FetchResponseData.FetchablePartitionResponse();
partitionResponse.setErrorCode(error.code())
.setHighWatermark(highWatermark)
.setLastStableOffset(lastStableOffset)
.setLogStartOffset(logStartOffset);
if (abortedTransactions != null) {
partitionResponse.setAbortedTransactions(abortedTransactions.stream().map(
aborted -> new FetchResponseData.AbortedTransaction()
.setProducerId(aborted.producerId)
.setFirstOffset(aborted.firstOffset))
.collect(Collectors.toList()));
} else {
partitionResponse.setAbortedTransactions(null);
}
partitionResponse.setPreferredReadReplica(preferredReadReplica.orElse(INVALID_PREFERRED_REPLICA_ID));
partitionResponse.setRecordSet(records);
divergingEpoch.ifPresent(partitionResponse::setDivergingEpoch);
this.partitionResponse = partitionResponse;
}
public PartitionData(Errors error,
long highWatermark,
long lastStableOffset,
long logStartOffset,
Optional<Integer> preferredReadReplica,
List<AbortedTransaction> abortedTransactions,
T records) {
this(error, highWatermark, lastStableOffset, logStartOffset, preferredReadReplica,
abortedTransactions, Optional.empty(), records);
}
public PartitionData(Errors error,
long highWatermark,
long lastStableOffset,
long logStartOffset,
List<AbortedTransaction> abortedTransactions,
T records) {
this(error, highWatermark, lastStableOffset, logStartOffset, Optional.empty(), abortedTransactions, records);
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
PartitionData that = (PartitionData) o;
return this.partitionResponse.equals(that.partitionResponse);
}
@Override
public int hashCode() {
return this.partitionResponse.hashCode();
}
@Override
public String toString() {
return "(error=" + error() +
", highWaterMark=" + highWatermark() +
", lastStableOffset = " + lastStableOffset() +
", logStartOffset = " + logStartOffset() +
", preferredReadReplica = " + preferredReadReplica().map(Object::toString).orElse("absent") +
", abortedTransactions = " + abortedTransactions() +
", divergingEpoch =" + divergingEpoch() +
", recordsSizeInBytes=" + records().sizeInBytes() + ")";
}
public Errors error() {
return error;
}
public long highWatermark() {
return partitionResponse.highWatermark();
}
public long lastStableOffset() {
return partitionResponse.lastStableOffset();
}
public long logStartOffset() {
return partitionResponse.logStartOffset();
}
public Optional<Integer> preferredReadReplica() {
return preferredReplica;
}
public List<AbortedTransaction> abortedTransactions() {
return abortedTransactions;
}
public Optional<FetchResponseData.EpochEndOffset> divergingEpoch() {
FetchResponseData.EpochEndOffset epochEndOffset = partitionResponse.divergingEpoch();
if (epochEndOffset.epoch() < 0) {
return Optional.empty();
} else {
return Optional.of(epochEndOffset);
}
}
@SuppressWarnings("unchecked")
public T records() {
return (T) partitionResponse.recordSet();
}
}
/**
* From version 3 or later, the entries in `responseData` should be in the same order as the entries in
* `FetchRequest.fetchData`.
*
* @param error The top-level error code.
* @param responseData The fetched data grouped by partition.
* @param throttleTimeMs The time in milliseconds that the response was throttled
* @param sessionId The fetch session id.
*/
public FetchResponse(Errors error,
LinkedHashMap<TopicPartition, PartitionData<T>> responseData,
int throttleTimeMs,
int sessionId) {
super(ApiKeys.FETCH);
this.data = toMessage(throttleTimeMs, error, responseData.entrySet().iterator(), sessionId);
this.responseDataMap = responseData;
}
public FetchResponse(FetchResponseData fetchResponseData) {
super(ApiKeys.FETCH);
this.data = fetchResponseData;
this.responseDataMap = toResponseDataMap(fetchResponseData);
}
public Errors error() {
return Errors.forCode(data.errorCode());
}
public LinkedHashMap<TopicPartition, PartitionData<T>> responseData() {
return responseDataMap;
public LinkedHashMap<TopicPartition, FetchResponseData.PartitionData> responseData() {
if (responseData == null) {
synchronized (this) {
if (responseData == null) {
responseData = new LinkedHashMap<>();
data.responses().forEach(topicResponse ->
topicResponse.partitions().forEach(partition ->
responseData.put(new TopicPartition(topicResponse.topic(), partition.partitionIndex()), partition))
);
}
}
}
return responseData;
}
@Override
@ -304,58 +108,15 @@ public class FetchResponse<T extends BaseRecords> extends AbstractResponse {
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> errorCounts = new HashMap<>();
updateErrorCounts(errorCounts, error());
responseDataMap.values().forEach(response ->
updateErrorCounts(errorCounts, response.error())
data.responses().forEach(topicResponse ->
topicResponse.partitions().forEach(partition ->
updateErrorCounts(errorCounts, Errors.forCode(partition.errorCode())))
);
return errorCounts;
}
public static FetchResponse<MemoryRecords> parse(ByteBuffer buffer, short version) {
return new FetchResponse<>(new FetchResponseData(new ByteBufferAccessor(buffer), version));
}
@SuppressWarnings("unchecked")
private static <T extends BaseRecords> LinkedHashMap<TopicPartition, PartitionData<T>> toResponseDataMap(
FetchResponseData message) {
LinkedHashMap<TopicPartition, PartitionData<T>> responseMap = new LinkedHashMap<>();
message.responses().forEach(topicResponse -> {
topicResponse.partitionResponses().forEach(partitionResponse -> {
TopicPartition tp = new TopicPartition(topicResponse.topic(), partitionResponse.partition());
PartitionData<T> partitionData = new PartitionData<>(partitionResponse);
responseMap.put(tp, partitionData);
});
});
return responseMap;
}
private static <T extends BaseRecords> FetchResponseData toMessage(int throttleTimeMs, Errors error,
Iterator<Map.Entry<TopicPartition, PartitionData<T>>> partIterator,
int sessionId) {
List<FetchResponseData.FetchableTopicResponse> topicResponseList = new ArrayList<>();
partIterator.forEachRemaining(entry -> {
PartitionData<T> partitionData = entry.getValue();
// Since PartitionData alone doesn't know the partition ID, we set it here
partitionData.partitionResponse.setPartition(entry.getKey().partition());
// We have to keep the order of input topic-partition. Hence, we batch the partitions only if the last
// batch is in the same topic group.
FetchResponseData.FetchableTopicResponse previousTopic = topicResponseList.isEmpty() ? null
: topicResponseList.get(topicResponseList.size() - 1);
if (previousTopic != null && previousTopic.topic().equals(entry.getKey().topic()))
previousTopic.partitionResponses().add(partitionData.partitionResponse);
else {
List<FetchResponseData.FetchablePartitionResponse> partitionResponses = new ArrayList<>();
partitionResponses.add(partitionData.partitionResponse);
topicResponseList.add(new FetchResponseData.FetchableTopicResponse()
.setTopic(entry.getKey().topic())
.setPartitionResponses(partitionResponses));
}
});
return new FetchResponseData()
.setThrottleTimeMs(throttleTimeMs)
.setErrorCode(error.code())
.setSessionId(sessionId)
.setResponses(topicResponseList);
public static FetchResponse parse(ByteBuffer buffer, short version) {
return new FetchResponse(new FetchResponseData(new ByteBufferAccessor(buffer), version));
}
/**
@ -365,11 +126,11 @@ public class FetchResponse<T extends BaseRecords> extends AbstractResponse {
* @param partIterator The partition iterator.
* @return The response size in bytes.
*/
public static <T extends BaseRecords> int sizeOf(short version,
Iterator<Map.Entry<TopicPartition, PartitionData<T>>> partIterator) {
public static int sizeOf(short version,
Iterator<Map.Entry<TopicPartition, FetchResponseData.PartitionData>> partIterator) {
// Since the throttleTimeMs and metadata field sizes are constant and fixed, we can
// use arbitrary values here without affecting the result.
FetchResponseData data = toMessage(0, Errors.NONE, partIterator, INVALID_SESSION_ID);
FetchResponseData data = toMessage(Errors.NONE, 0, INVALID_SESSION_ID, partIterator);
ObjectSerializationCache cache = new ObjectSerializationCache();
return 4 + data.size(cache, version);
}
@ -378,4 +139,91 @@ public class FetchResponse<T extends BaseRecords> extends AbstractResponse {
public boolean shouldClientThrottle(short version) {
return version >= 8;
}
}
public static Optional<FetchResponseData.EpochEndOffset> divergingEpoch(FetchResponseData.PartitionData partitionResponse) {
return partitionResponse.divergingEpoch().epoch() < 0 ? Optional.empty()
: Optional.of(partitionResponse.divergingEpoch());
}
public static boolean isDivergingEpoch(FetchResponseData.PartitionData partitionResponse) {
return partitionResponse.divergingEpoch().epoch() >= 0;
}
public static Optional<Integer> preferredReadReplica(FetchResponseData.PartitionData partitionResponse) {
return partitionResponse.preferredReadReplica() == INVALID_PREFERRED_REPLICA_ID ? Optional.empty()
: Optional.of(partitionResponse.preferredReadReplica());
}
public static boolean isPreferredReplica(FetchResponseData.PartitionData partitionResponse) {
return partitionResponse.preferredReadReplica() != INVALID_PREFERRED_REPLICA_ID;
}
public static FetchResponseData.PartitionData partitionResponse(int partition, Errors error) {
return new FetchResponseData.PartitionData()
.setPartitionIndex(partition)
.setErrorCode(error.code())
.setHighWatermark(FetchResponse.INVALID_HIGH_WATERMARK);
}
/**
* Returns `partition.records` as `Records` (instead of `BaseRecords`). If `records` is `null`, returns `MemoryRecords.EMPTY`.
*
* If this response was deserialized after a fetch, this method should never fail. An example where this would
* fail is a down-converted response (e.g. LazyDownConversionRecords) on the broker (before it's serialized and
* sent on the wire).
*
* @param partition partition data
* @return Records or empty record if the records in PartitionData is null.
*/
public static Records recordsOrFail(FetchResponseData.PartitionData partition) {
if (partition.records() == null) return MemoryRecords.EMPTY;
if (partition.records() instanceof Records) return (Records) partition.records();
throw new ClassCastException("The record type is " + partition.records().getClass().getSimpleName() + ", which is not a subtype of " +
Records.class.getSimpleName() + ". This method is only safe to call if the `FetchResponse` was deserialized from bytes.");
}
/**
* @return The size in bytes of the records. 0 is returned if records of input partition is null.
*/
public static int recordsSize(FetchResponseData.PartitionData partition) {
return partition.records() == null ? 0 : partition.records().sizeInBytes();
}
public static FetchResponse of(Errors error,
int throttleTimeMs,
int sessionId,
LinkedHashMap<TopicPartition, FetchResponseData.PartitionData> responseData) {
return new FetchResponse(toMessage(error, throttleTimeMs, sessionId, responseData.entrySet().iterator()));
}
private static FetchResponseData toMessage(Errors error,
int throttleTimeMs,
int sessionId,
Iterator<Map.Entry<TopicPartition, FetchResponseData.PartitionData>> partIterator) {
List<FetchResponseData.FetchableTopicResponse> topicResponseList = new ArrayList<>();
partIterator.forEachRemaining(entry -> {
FetchResponseData.PartitionData partitionData = entry.getValue();
// Since PartitionData alone doesn't know the partition ID, we set it here
partitionData.setPartitionIndex(entry.getKey().partition());
// We have to keep the order of input topic-partition. Hence, we batch the partitions only if the last
// batch is in the same topic group.
FetchResponseData.FetchableTopicResponse previousTopic = topicResponseList.isEmpty() ? null
: topicResponseList.get(topicResponseList.size() - 1);
if (previousTopic != null && previousTopic.topic().equals(entry.getKey().topic()))
previousTopic.partitions().add(partitionData);
else {
List<FetchResponseData.PartitionData> partitionResponses = new ArrayList<>();
partitionResponses.add(partitionData);
topicResponseList.add(new FetchResponseData.FetchableTopicResponse()
.setTopic(entry.getKey().topic())
.setPartitions(partitionResponses));
}
});
return new FetchResponseData()
.setThrottleTimeMs(throttleTimeMs)
.setErrorCode(error.code())
.setSessionId(sessionId)
.setResponses(topicResponseList);
}
}

View File

@ -53,9 +53,9 @@
"about": "The response topics.", "fields": [
{ "name": "Topic", "type": "string", "versions": "0+", "entityType": "topicName",
"about": "The topic name." },
{ "name": "PartitionResponses", "type": "[]FetchablePartitionResponse", "versions": "0+",
{ "name": "Partitions", "type": "[]PartitionData", "versions": "0+",
"about": "The topic partitions.", "fields": [
{ "name": "Partition", "type": "int32", "versions": "0+",
{ "name": "PartitionIndex", "type": "int32", "versions": "0+",
"about": "The partition index." },
{ "name": "ErrorCode", "type": "int16", "versions": "0+",
"about": "The error code, or 0 if there was no fetch error." },
@ -94,7 +94,7 @@
]},
{ "name": "PreferredReadReplica", "type": "int32", "versions": "11+", "default": "-1", "ignorable": false,
"about": "The preferred read replica for the consumer to use on its next fetch request"},
{ "name": "RecordSet", "type": "records", "versions": "0+", "nullableVersions": "0+", "about": "The record data."}
{ "name": "Records", "type": "records", "versions": "0+", "nullableVersions": "0+", "about": "The record data."}
]}
]}
]

View File

@ -17,8 +17,8 @@
package org.apache.kafka.clients;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.message.FetchResponseData;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.record.MemoryRecords;
import org.apache.kafka.common.requests.FetchRequest;
import org.apache.kafka.common.requests.FetchResponse;
import org.apache.kafka.common.utils.LogContext;
@ -150,22 +150,21 @@ public class FetchSessionHandlerTest {
private static final class RespEntry {
final TopicPartition part;
final FetchResponse.PartitionData<MemoryRecords> data;
final FetchResponseData.PartitionData data;
RespEntry(String topic, int partition, long highWatermark, long lastStableOffset) {
this.part = new TopicPartition(topic, partition);
this.data = new FetchResponse.PartitionData<>(
Errors.NONE,
highWatermark,
lastStableOffset,
0,
null,
null);
this.data = new FetchResponseData.PartitionData()
.setPartitionIndex(partition)
.setHighWatermark(highWatermark)
.setLastStableOffset(lastStableOffset)
.setLogStartOffset(0);
}
}
private static LinkedHashMap<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> respMap(RespEntry... entries) {
LinkedHashMap<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> map = new LinkedHashMap<>();
private static LinkedHashMap<TopicPartition, FetchResponseData.PartitionData> respMap(RespEntry... entries) {
LinkedHashMap<TopicPartition, FetchResponseData.PartitionData> map = new LinkedHashMap<>();
for (RespEntry entry : entries) {
map.put(entry.part, entry.data);
}
@ -191,10 +190,10 @@ public class FetchSessionHandlerTest {
assertEquals(INVALID_SESSION_ID, data.metadata().sessionId());
assertEquals(INITIAL_EPOCH, data.metadata().epoch());
FetchResponse<MemoryRecords> resp = new FetchResponse<>(Errors.NONE,
FetchResponse resp = FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID,
respMap(new RespEntry("foo", 0, 0, 0),
new RespEntry("foo", 1, 0, 0)),
0, INVALID_SESSION_ID);
new RespEntry("foo", 1, 0, 0))
);
handler.handleResponse(resp);
FetchSessionHandler.Builder builder2 = handler.newBuilder();
@ -225,10 +224,9 @@ public class FetchSessionHandlerTest {
assertEquals(INVALID_SESSION_ID, data.metadata().sessionId());
assertEquals(INITIAL_EPOCH, data.metadata().epoch());
FetchResponse<MemoryRecords> resp = new FetchResponse<>(Errors.NONE,
FetchResponse resp = FetchResponse.of(Errors.NONE, 0, 123,
respMap(new RespEntry("foo", 0, 10, 20),
new RespEntry("foo", 1, 10, 20)),
0, 123);
new RespEntry("foo", 1, 10, 20)));
handler.handleResponse(resp);
// Test an incremental fetch request which adds one partition and modifies another.
@ -249,15 +247,14 @@ public class FetchSessionHandlerTest {
new ReqEntry("foo", 1, 10, 120, 210)),
data2.toSend());
FetchResponse<MemoryRecords> resp2 = new FetchResponse<>(Errors.NONE,
respMap(new RespEntry("foo", 1, 20, 20)),
0, 123);
FetchResponse resp2 = FetchResponse.of(Errors.NONE, 0, 123,
respMap(new RespEntry("foo", 1, 20, 20)));
handler.handleResponse(resp2);
// Skip building a new request. Test that handling an invalid fetch session epoch response results
// in a request which closes the session.
FetchResponse<MemoryRecords> resp3 = new FetchResponse<>(Errors.INVALID_FETCH_SESSION_EPOCH, respMap(),
0, INVALID_SESSION_ID);
FetchResponse resp3 = FetchResponse.of(Errors.INVALID_FETCH_SESSION_EPOCH,
0, INVALID_SESSION_ID, respMap());
handler.handleResponse(resp3);
FetchSessionHandler.Builder builder4 = handler.newBuilder();
@ -312,11 +309,10 @@ public class FetchSessionHandlerTest {
data.toSend(), data.sessionPartitions());
assertTrue(data.metadata().isFull());
FetchResponse<MemoryRecords> resp = new FetchResponse<>(Errors.NONE,
FetchResponse resp = FetchResponse.of(Errors.NONE, 0, 123,
respMap(new RespEntry("foo", 0, 10, 20),
new RespEntry("foo", 1, 10, 20),
new RespEntry("bar", 0, 10, 20)),
0, 123);
new RespEntry("bar", 0, 10, 20)));
handler.handleResponse(resp);
// Test an incremental fetch request which removes two partitions.
@ -337,8 +333,8 @@ public class FetchSessionHandlerTest {
// A FETCH_SESSION_ID_NOT_FOUND response triggers us to close the session.
// The next request is a session establishing FULL request.
FetchResponse<MemoryRecords> resp2 = new FetchResponse<>(Errors.FETCH_SESSION_ID_NOT_FOUND,
respMap(), 0, INVALID_SESSION_ID);
FetchResponse resp2 = FetchResponse.of(Errors.FETCH_SESSION_ID_NOT_FOUND,
0, INVALID_SESSION_ID, respMap());
handler.handleResponse(resp2);
FetchSessionHandler.Builder builder3 = handler.newBuilder();
builder3.add(new TopicPartition("foo", 0),
@ -354,11 +350,10 @@ public class FetchSessionHandlerTest {
@Test
public void testVerifyFullFetchResponsePartitions() throws Exception {
FetchSessionHandler handler = new FetchSessionHandler(LOG_CONTEXT, 1);
String issue = handler.verifyFullFetchResponsePartitions(new FetchResponse<>(Errors.NONE,
String issue = handler.verifyFullFetchResponsePartitions(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID,
respMap(new RespEntry("foo", 0, 10, 20),
new RespEntry("foo", 1, 10, 20),
new RespEntry("bar", 0, 10, 20)),
0, INVALID_SESSION_ID));
new RespEntry("bar", 0, 10, 20))));
assertTrue(issue.contains("extra"));
assertFalse(issue.contains("omitted"));
FetchSessionHandler.Builder builder = handler.newBuilder();
@ -369,16 +364,14 @@ public class FetchSessionHandlerTest {
builder.add(new TopicPartition("bar", 0),
new FetchRequest.PartitionData(20, 120, 220, Optional.empty()));
builder.build();
String issue2 = handler.verifyFullFetchResponsePartitions(new FetchResponse<>(Errors.NONE,
String issue2 = handler.verifyFullFetchResponsePartitions(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID,
respMap(new RespEntry("foo", 0, 10, 20),
new RespEntry("foo", 1, 10, 20),
new RespEntry("bar", 0, 10, 20)),
0, INVALID_SESSION_ID));
new RespEntry("bar", 0, 10, 20))));
assertTrue(issue2 == null);
String issue3 = handler.verifyFullFetchResponsePartitions(new FetchResponse<>(Errors.NONE,
String issue3 = handler.verifyFullFetchResponsePartitions(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID,
respMap(new RespEntry("foo", 0, 10, 20),
new RespEntry("foo", 1, 10, 20)),
0, INVALID_SESSION_ID));
new RespEntry("foo", 1, 10, 20))));
assertFalse(issue3.contains("extra"));
assertTrue(issue3.contains("omitted"));
}

View File

@ -45,18 +45,20 @@ import org.apache.kafka.common.errors.InvalidGroupIdException;
import org.apache.kafka.common.errors.InvalidTopicException;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.errors.WakeupException;
import org.apache.kafka.common.internals.ClusterResourceListeners;
import org.apache.kafka.common.message.FetchResponseData;
import org.apache.kafka.common.message.HeartbeatResponseData;
import org.apache.kafka.common.message.JoinGroupRequestData;
import org.apache.kafka.common.message.JoinGroupResponseData;
import org.apache.kafka.common.message.LeaveGroupResponseData;
import org.apache.kafka.common.message.ListOffsetsResponseData;
import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse;
import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsPartitionResponse;
import org.apache.kafka.common.internals.ClusterResourceListeners;
import org.apache.kafka.common.message.SyncGroupResponseData;
import org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsPartition;
import org.apache.kafka.common.message.ListOffsetsResponseData;
import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsPartitionResponse;
import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse;
import org.apache.kafka.common.message.SyncGroupResponseData;
import org.apache.kafka.common.metrics.Metrics;
import org.apache.kafka.common.metrics.Sensor;
import org.apache.kafka.common.metrics.stats.Avg;
import org.apache.kafka.common.network.Selectable;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.Errors;
@ -91,8 +93,6 @@ import org.apache.kafka.common.utils.Utils;
import org.apache.kafka.test.MockConsumerInterceptor;
import org.apache.kafka.test.MockMetricsReporter;
import org.apache.kafka.test.TestUtils;
import org.apache.kafka.common.metrics.stats.Avg;
import org.junit.jupiter.api.Test;
import javax.management.MBeanServer;
@ -2276,8 +2276,8 @@ public class KafkaConsumerTest {
return new ListOffsetsResponse(data);
}
private FetchResponse<MemoryRecords> fetchResponse(Map<TopicPartition, FetchInfo> fetches) {
LinkedHashMap<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> tpResponses = new LinkedHashMap<>();
private FetchResponse fetchResponse(Map<TopicPartition, FetchInfo> fetches) {
LinkedHashMap<TopicPartition, FetchResponseData.PartitionData> tpResponses = new LinkedHashMap<>();
for (Map.Entry<TopicPartition, FetchInfo> fetchEntry : fetches.entrySet()) {
TopicPartition partition = fetchEntry.getKey();
long fetchOffset = fetchEntry.getValue().offset;
@ -2294,14 +2294,17 @@ public class KafkaConsumerTest {
builder.append(0L, ("key-" + i).getBytes(), ("value-" + i).getBytes());
records = builder.build();
}
tpResponses.put(partition, new FetchResponse.PartitionData<>(
Errors.NONE, highWatermark, FetchResponse.INVALID_LAST_STABLE_OFFSET,
logStartOffset, null, records));
tpResponses.put(partition,
new FetchResponseData.PartitionData()
.setPartitionIndex(partition.partition())
.setHighWatermark(highWatermark)
.setLogStartOffset(logStartOffset)
.setRecords(records));
}
return new FetchResponse<>(Errors.NONE, tpResponses, 0, INVALID_SESSION_ID);
return FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, tpResponses);
}
private FetchResponse<MemoryRecords> fetchResponse(TopicPartition partition, long fetchOffset, int count) {
private FetchResponse fetchResponse(TopicPartition partition, long fetchOffset, int count) {
FetchInfo fetchInfo = new FetchInfo(fetchOffset, count);
return fetchResponse(Collections.singletonMap(partition, fetchInfo));
}

View File

@ -48,6 +48,7 @@ import org.apache.kafka.common.errors.TopicAuthorizationException;
import org.apache.kafka.common.header.Header;
import org.apache.kafka.common.header.internals.RecordHeader;
import org.apache.kafka.common.internals.ClusterResourceListeners;
import org.apache.kafka.common.message.FetchResponseData;
import org.apache.kafka.common.message.ApiMessageType;
import org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsPartition;
import org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsTopic;
@ -1270,13 +1271,18 @@ public class FetcherTest {
assertEquals(1, fetcher.sendFetches());
Map<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> partitions = new LinkedHashMap<>();
partitions.put(tp1, new FetchResponse.PartitionData<>(Errors.NONE, 100,
FetchResponse.INVALID_LAST_STABLE_OFFSET, FetchResponse.INVALID_LOG_START_OFFSET, null, records));
partitions.put(tp0, new FetchResponse.PartitionData<>(Errors.OFFSET_OUT_OF_RANGE, 100,
FetchResponse.INVALID_LAST_STABLE_OFFSET, FetchResponse.INVALID_LOG_START_OFFSET, null, MemoryRecords.EMPTY));
client.prepareResponse(new FetchResponse<>(Errors.NONE, new LinkedHashMap<>(partitions),
0, INVALID_SESSION_ID));
Map<TopicPartition, FetchResponseData.PartitionData> partitions = new LinkedHashMap<>();
partitions.put(tp1, new FetchResponseData.PartitionData()
.setPartitionIndex(tp1.partition())
.setHighWatermark(100)
.setRecords(records));
partitions.put(tp0, new FetchResponseData.PartitionData()
.setPartitionIndex(tp0.partition())
.setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code())
.setHighWatermark(100));
client.prepareResponse(FetchResponse.of(Errors.NONE,
0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)));
consumerClient.poll(time.timer(0));
List<ConsumerRecord<byte[], byte[]>> allFetchedRecords = new ArrayList<>();
@ -1316,17 +1322,29 @@ public class FetcherTest {
assertEquals(1, fetcher.sendFetches());
Map<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> partitions = new LinkedHashMap<>();
partitions.put(tp1, new FetchResponse.PartitionData<>(Errors.NONE, 100, FetchResponse.INVALID_LAST_STABLE_OFFSET,
FetchResponse.INVALID_LOG_START_OFFSET, null, records));
partitions.put(tp0, new FetchResponse.PartitionData<>(Errors.OFFSET_OUT_OF_RANGE, 100,
FetchResponse.INVALID_LAST_STABLE_OFFSET, FetchResponse.INVALID_LOG_START_OFFSET, null, MemoryRecords.EMPTY));
partitions.put(tp2, new FetchResponse.PartitionData<>(Errors.NONE, 100L, 4,
0L, null, nextRecords));
partitions.put(tp3, new FetchResponse.PartitionData<>(Errors.NONE, 100L, 4,
0L, null, partialRecords));
client.prepareResponse(new FetchResponse<>(Errors.NONE, new LinkedHashMap<>(partitions),
0, INVALID_SESSION_ID));
Map<TopicPartition, FetchResponseData.PartitionData> partitions = new LinkedHashMap<>();
partitions.put(tp1, new FetchResponseData.PartitionData()
.setPartitionIndex(tp1.partition())
.setHighWatermark(100)
.setRecords(records));
partitions.put(tp0, new FetchResponseData.PartitionData()
.setPartitionIndex(tp0.partition())
.setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code())
.setHighWatermark(100));
partitions.put(tp2, new FetchResponseData.PartitionData()
.setPartitionIndex(tp2.partition())
.setHighWatermark(100)
.setLastStableOffset(4)
.setLogStartOffset(0)
.setRecords(nextRecords));
partitions.put(tp3, new FetchResponseData.PartitionData()
.setPartitionIndex(tp3.partition())
.setHighWatermark(100)
.setLastStableOffset(4)
.setLogStartOffset(0)
.setRecords(partialRecords));
client.prepareResponse(FetchResponse.of(Errors.NONE,
0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)));
consumerClient.poll(time.timer(0));
List<ConsumerRecord<byte[], byte[]>> fetchedRecords = new ArrayList<>();
@ -1384,9 +1402,11 @@ public class FetcherTest {
assignFromUser(Utils.mkSet(tp0));
subscriptions.seek(tp0, 1);
assertEquals(1, fetcher.sendFetches());
Map<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> partitions = new HashMap<>();
partitions.put(tp0, new FetchResponse.PartitionData<>(Errors.NONE, 100,
FetchResponse.INVALID_LAST_STABLE_OFFSET, FetchResponse.INVALID_LOG_START_OFFSET, Optional.empty(), null, records));
Map<TopicPartition, FetchResponseData.PartitionData> partitions = new HashMap<>();
partitions.put(tp0, new FetchResponseData.PartitionData()
.setPartitionIndex(tp0.partition())
.setHighWatermark(100)
.setRecords(records));
client.prepareResponse(fullFetchResponse(tp0, this.records, Errors.NONE, 100L, 0));
consumerClient.poll(time.timer(0));
@ -1397,9 +1417,11 @@ public class FetcherTest {
assertEquals(1, fetcher.sendFetches());
partitions = new HashMap<>();
partitions.put(tp1, new FetchResponse.PartitionData<>(Errors.OFFSET_OUT_OF_RANGE, 100,
FetchResponse.INVALID_LAST_STABLE_OFFSET, FetchResponse.INVALID_LOG_START_OFFSET, Optional.empty(), null, MemoryRecords.EMPTY));
client.prepareResponse(new FetchResponse<>(Errors.NONE, new LinkedHashMap<>(partitions), 0, INVALID_SESSION_ID));
partitions.put(tp1, new FetchResponseData.PartitionData()
.setPartitionIndex(tp1.partition())
.setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code())
.setHighWatermark(100));
client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)));
consumerClient.poll(time.timer(0));
assertEquals(1, fetcher.fetchedRecords().get(tp0).size());
@ -2094,7 +2116,7 @@ public class FetcherTest {
ClientRequest request = client.newClientRequest(node.idString(), builder, time.milliseconds(), true);
client.send(request, time.milliseconds());
client.poll(1, time.milliseconds());
FetchResponse<MemoryRecords> response = fullFetchResponse(tp0, nextRecords, Errors.NONE, i, throttleTimeMs);
FetchResponse response = fullFetchResponse(tp0, nextRecords, Errors.NONE, i, throttleTimeMs);
buffer = RequestTestUtils.serializeResponseWithHeader(response, ApiKeys.FETCH.latestVersion(), request.correlationId());
selector.completeReceive(new NetworkReceive(node.idString(), buffer));
client.poll(1, time.milliseconds());
@ -2256,7 +2278,7 @@ public class FetcherTest {
client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, partitionCounts, tp -> validLeaderEpoch));
int expectedBytes = 0;
LinkedHashMap<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> fetchPartitionData = new LinkedHashMap<>();
LinkedHashMap<TopicPartition, FetchResponseData.PartitionData> fetchPartitionData = new LinkedHashMap<>();
for (TopicPartition tp : Utils.mkSet(tp1, tp2)) {
subscriptions.seek(tp, 0);
@ -2269,12 +2291,15 @@ public class FetcherTest {
for (Record record : records.records())
expectedBytes += record.sizeInBytes();
fetchPartitionData.put(tp, new FetchResponse.PartitionData<>(Errors.NONE, 15L,
FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, records));
fetchPartitionData.put(tp, new FetchResponseData.PartitionData()
.setPartitionIndex(tp.partition())
.setHighWatermark(15)
.setLogStartOffset(0)
.setRecords(records));
}
assertEquals(1, fetcher.sendFetches());
client.prepareResponse(new FetchResponse<>(Errors.NONE, fetchPartitionData, 0, INVALID_SESSION_ID));
client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, fetchPartitionData));
consumerClient.poll(time.timer(0));
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
@ -2333,15 +2358,21 @@ public class FetcherTest {
builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes());
MemoryRecords records = builder.build();
Map<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> partitions = new HashMap<>();
partitions.put(tp0, new FetchResponse.PartitionData<>(Errors.NONE, 100,
FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, records));
partitions.put(tp1, new FetchResponse.PartitionData<>(Errors.OFFSET_OUT_OF_RANGE, 100,
FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, MemoryRecords.EMPTY));
Map<TopicPartition, FetchResponseData.PartitionData> partitions = new HashMap<>();
partitions.put(tp0, new FetchResponseData.PartitionData()
.setPartitionIndex(tp0.partition())
.setHighWatermark(100)
.setLogStartOffset(0)
.setRecords(records));
partitions.put(tp1, new FetchResponseData.PartitionData()
.setPartitionIndex(tp1.partition())
.setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code())
.setHighWatermark(100)
.setLogStartOffset(0));
assertEquals(1, fetcher.sendFetches());
client.prepareResponse(new FetchResponse<>(Errors.NONE, new LinkedHashMap<>(partitions),
0, INVALID_SESSION_ID));
client.prepareResponse(FetchResponse.of(Errors.NONE,
0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)));
consumerClient.poll(time.timer(0));
fetcher.fetchedRecords();
@ -2375,15 +2406,19 @@ public class FetcherTest {
builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes());
MemoryRecords records = builder.build();
Map<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> partitions = new HashMap<>();
partitions.put(tp0, new FetchResponse.PartitionData<>(Errors.NONE, 100,
FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, records));
partitions.put(tp1, new FetchResponse.PartitionData<>(Errors.NONE, 100,
FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null,
MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("val".getBytes()))));
Map<TopicPartition, FetchResponseData.PartitionData> partitions = new HashMap<>();
partitions.put(tp0, new FetchResponseData.PartitionData()
.setPartitionIndex(tp0.partition())
.setHighWatermark(100)
.setLogStartOffset(0)
.setRecords(records));
partitions.put(tp1, new FetchResponseData.PartitionData()
.setPartitionIndex(tp1.partition())
.setHighWatermark(100)
.setLogStartOffset(0)
.setRecords(MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("val".getBytes()))));
client.prepareResponse(new FetchResponse<>(Errors.NONE, new LinkedHashMap<>(partitions),
0, INVALID_SESSION_ID));
client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)));
consumerClient.poll(time.timer(0));
fetcher.fetchedRecords();
@ -2778,8 +2813,8 @@ public class FetcherTest {
buffer.flip();
List<FetchResponse.AbortedTransaction> abortedTransactions = new ArrayList<>();
abortedTransactions.add(new FetchResponse.AbortedTransaction(1, 0));
List<FetchResponseData.AbortedTransaction> abortedTransactions = Collections.singletonList(
new FetchResponseData.AbortedTransaction().setProducerId(1).setFirstOffset(0));
MemoryRecords records = MemoryRecords.readableRecords(buffer);
assignFromUser(singleton(tp0));
@ -2811,7 +2846,6 @@ public class FetcherTest {
commitTransaction(buffer, 1L, currentOffset);
buffer.flip();
List<FetchResponse.AbortedTransaction> abortedTransactions = new ArrayList<>();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
assignFromUser(singleton(tp0));
@ -2824,7 +2858,7 @@ public class FetcherTest {
FetchRequest request = (FetchRequest) body;
assertEquals(IsolationLevel.READ_COMMITTED, request.isolationLevel());
return true;
}, fullFetchResponseWithAbortedTransactions(records, abortedTransactions, Errors.NONE, 100L, 100L, 0));
}, fullFetchResponseWithAbortedTransactions(records, Collections.emptyList(), Errors.NONE, 100L, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
@ -2840,7 +2874,7 @@ public class FetcherTest {
new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
ByteBuffer buffer = ByteBuffer.allocate(1024);
List<FetchResponse.AbortedTransaction> abortedTransactions = new ArrayList<>();
List<FetchResponseData.AbortedTransaction> abortedTransactions = new ArrayList<>();
long pid1 = 1L;
long pid2 = 2L;
@ -2863,7 +2897,7 @@ public class FetcherTest {
// abort producer 2
abortTransaction(buffer, pid2, 5L);
abortedTransactions.add(new FetchResponse.AbortedTransaction(pid2, 2L));
abortedTransactions.add(new FetchResponseData.AbortedTransaction().setProducerId(pid2).setFirstOffset(2L));
// New transaction for producer 1 (eventually aborted)
appendTransactionalRecords(buffer, pid1, 6L,
@ -2879,7 +2913,7 @@ public class FetcherTest {
// abort producer 1
abortTransaction(buffer, pid1, 9L);
abortedTransactions.add(new FetchResponse.AbortedTransaction(1, 6));
abortedTransactions.add(new FetchResponseData.AbortedTransaction().setProducerId(1).setFirstOffset(6));
// commit producer 2
commitTransaction(buffer, pid2, 10L);
@ -2931,8 +2965,9 @@ public class FetcherTest {
commitTransaction(buffer, 1L, currentOffset);
buffer.flip();
List<FetchResponse.AbortedTransaction> abortedTransactions = new ArrayList<>();
abortedTransactions.add(new FetchResponse.AbortedTransaction(1, 0));
List<FetchResponseData.AbortedTransaction> abortedTransactions = Collections.singletonList(
new FetchResponseData.AbortedTransaction().setProducerId(1).setFirstOffset(0)
);
MemoryRecords records = MemoryRecords.readableRecords(buffer);
assignFromUser(singleton(tp0));
@ -2983,8 +3018,8 @@ public class FetcherTest {
assertEquals(1, fetcher.sendFetches());
// prepare the response. the aborted transactions begin at offsets which are no longer in the log
List<FetchResponse.AbortedTransaction> abortedTransactions = new ArrayList<>();
abortedTransactions.add(new FetchResponse.AbortedTransaction(producerId, 0L));
List<FetchResponseData.AbortedTransaction> abortedTransactions = Collections.singletonList(
new FetchResponseData.AbortedTransaction().setProducerId(producerId).setFirstOffset(0L));
client.prepareResponse(fullFetchResponseWithAbortedTransactions(MemoryRecords.readableRecords(buffer),
abortedTransactions, Errors.NONE, 100L, 100L, 0));
@ -3120,9 +3155,10 @@ public class FetcherTest {
assertEquals(1, fetcher.sendFetches());
// prepare the response. the aborted transactions begin at offsets which are no longer in the log
List<FetchResponse.AbortedTransaction> abortedTransactions = new ArrayList<>();
abortedTransactions.add(new FetchResponse.AbortedTransaction(pid2, 6L));
abortedTransactions.add(new FetchResponse.AbortedTransaction(pid1, 0L));
List<FetchResponseData.AbortedTransaction> abortedTransactions = Arrays.asList(
new FetchResponseData.AbortedTransaction().setProducerId(pid2).setFirstOffset(6),
new FetchResponseData.AbortedTransaction().setProducerId(pid1).setFirstOffset(0)
);
client.prepareResponse(fullFetchResponseWithAbortedTransactions(MemoryRecords.readableRecords(buffer),
abortedTransactions, Errors.NONE, 100L, 100L, 0));
@ -3151,8 +3187,8 @@ public class FetcherTest {
buffer.flip();
List<FetchResponse.AbortedTransaction> abortedTransactions = new ArrayList<>();
abortedTransactions.add(new FetchResponse.AbortedTransaction(1, 0));
List<FetchResponseData.AbortedTransaction> abortedTransactions = Collections.singletonList(
new FetchResponseData.AbortedTransaction().setProducerId(1).setFirstOffset(0));
MemoryRecords records = MemoryRecords.readableRecords(buffer);
assignFromUser(singleton(tp0));
@ -3184,8 +3220,8 @@ public class FetcherTest {
currentOffset += abortTransaction(buffer, 1L, currentOffset);
buffer.flip();
List<FetchResponse.AbortedTransaction> abortedTransactions = new ArrayList<>();
abortedTransactions.add(new FetchResponse.AbortedTransaction(1, 0));
List<FetchResponseData.AbortedTransaction> abortedTransactions = Collections.singletonList(
new FetchResponseData.AbortedTransaction().setProducerId(1).setFirstOffset(0));
MemoryRecords records = MemoryRecords.readableRecords(buffer);
assignFromUser(singleton(tp0));
@ -3216,12 +3252,19 @@ public class FetcherTest {
subscriptions.seekValidated(tp1, new SubscriptionState.FetchPosition(1, Optional.empty(), metadata.currentLeader(tp1)));
// Fetch some records and establish an incremental fetch session.
LinkedHashMap<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> partitions1 = new LinkedHashMap<>();
partitions1.put(tp0, new FetchResponse.PartitionData<>(Errors.NONE, 2L,
2, 0L, null, this.records));
partitions1.put(tp1, new FetchResponse.PartitionData<>(Errors.NONE, 100L,
FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, emptyRecords));
FetchResponse<MemoryRecords> resp1 = new FetchResponse<>(Errors.NONE, partitions1, 0, 123);
LinkedHashMap<TopicPartition, FetchResponseData.PartitionData> partitions1 = new LinkedHashMap<>();
partitions1.put(tp0, new FetchResponseData.PartitionData()
.setPartitionIndex(tp0.partition())
.setHighWatermark(2)
.setLastStableOffset(2)
.setLogStartOffset(0)
.setRecords(this.records));
partitions1.put(tp1, new FetchResponseData.PartitionData()
.setPartitionIndex(tp1.partition())
.setHighWatermark(100)
.setLogStartOffset(0)
.setRecords(emptyRecords));
FetchResponse resp1 = FetchResponse.of(Errors.NONE, 0, 123, partitions1);
client.prepareResponse(resp1);
assertEquals(1, fetcher.sendFetches());
assertFalse(fetcher.hasCompletedFetches());
@ -3246,8 +3289,8 @@ public class FetcherTest {
assertEquals(4L, subscriptions.position(tp0).offset);
// The second response contains no new records.
LinkedHashMap<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> partitions2 = new LinkedHashMap<>();
FetchResponse<MemoryRecords> resp2 = new FetchResponse<>(Errors.NONE, partitions2, 0, 123);
LinkedHashMap<TopicPartition, FetchResponseData.PartitionData> partitions2 = new LinkedHashMap<>();
FetchResponse resp2 = FetchResponse.of(Errors.NONE, 0, 123, partitions2);
client.prepareResponse(resp2);
assertEquals(1, fetcher.sendFetches());
consumerClient.poll(time.timer(0));
@ -3257,10 +3300,14 @@ public class FetcherTest {
assertEquals(1L, subscriptions.position(tp1).offset);
// The third response contains some new records for tp0.
LinkedHashMap<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> partitions3 = new LinkedHashMap<>();
partitions3.put(tp0, new FetchResponse.PartitionData<>(Errors.NONE, 100L,
4, 0L, null, this.nextRecords));
FetchResponse<MemoryRecords> resp3 = new FetchResponse<>(Errors.NONE, partitions3, 0, 123);
LinkedHashMap<TopicPartition, FetchResponseData.PartitionData> partitions3 = new LinkedHashMap<>();
partitions3.put(tp0, new FetchResponseData.PartitionData()
.setPartitionIndex(tp0.partition())
.setHighWatermark(100)
.setLastStableOffset(4)
.setLogStartOffset(0)
.setRecords(this.nextRecords));
FetchResponse resp3 = FetchResponse.of(Errors.NONE, 0, 123, partitions3);
client.prepareResponse(resp3);
assertEquals(1, fetcher.sendFetches());
consumerClient.poll(time.timer(0));
@ -3319,7 +3366,7 @@ public class FetcherTest {
}
@Override
public boolean handleResponse(FetchResponse<?> response) {
public boolean handleResponse(FetchResponse response) {
verifySessionPartitions();
return handler.handleResponse(response);
}
@ -3367,14 +3414,18 @@ public class FetcherTest {
if (!client.requests().isEmpty()) {
ClientRequest request = client.requests().peek();
FetchRequest fetchRequest = (FetchRequest) request.requestBuilder().build();
LinkedHashMap<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> responseMap = new LinkedHashMap<>();
LinkedHashMap<TopicPartition, FetchResponseData.PartitionData> responseMap = new LinkedHashMap<>();
for (Map.Entry<TopicPartition, FetchRequest.PartitionData> entry : fetchRequest.fetchData().entrySet()) {
TopicPartition tp = entry.getKey();
long offset = entry.getValue().fetchOffset;
responseMap.put(tp, new FetchResponse.PartitionData<>(Errors.NONE, offset + 2L, offset + 2,
0L, null, buildRecords(offset, 2, offset)));
responseMap.put(tp, new FetchResponseData.PartitionData()
.setPartitionIndex(tp.partition())
.setHighWatermark(offset + 2)
.setLastStableOffset(offset + 2)
.setLogStartOffset(0)
.setRecords(buildRecords(offset, 2, offset)));
}
client.respondToRequest(request, new FetchResponse<>(Errors.NONE, responseMap, 0, 123));
client.respondToRequest(request, FetchResponse.of(Errors.NONE, 0, 123, responseMap));
consumerClient.poll(time.timer(0));
}
}
@ -3429,11 +3480,15 @@ public class FetcherTest {
assertTrue(epoch == 0 || epoch == nextEpoch,
String.format("Unexpected epoch expected %d got %d", nextEpoch, epoch));
nextEpoch++;
LinkedHashMap<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> responseMap = new LinkedHashMap<>();
responseMap.put(tp0, new FetchResponse.PartitionData<>(Errors.NONE, nextOffset + 2L, nextOffset + 2,
0L, null, buildRecords(nextOffset, 2, nextOffset)));
LinkedHashMap<TopicPartition, FetchResponseData.PartitionData> responseMap = new LinkedHashMap<>();
responseMap.put(tp0, new FetchResponseData.PartitionData()
.setPartitionIndex(tp0.partition())
.setHighWatermark(nextOffset + 2)
.setLastStableOffset(nextOffset + 2)
.setLogStartOffset(0)
.setRecords(buildRecords(nextOffset, 2, nextOffset)));
nextOffset += 2;
client.respondToRequest(request, new FetchResponse<>(Errors.NONE, responseMap, 0, 123));
client.respondToRequest(request, FetchResponse.of(Errors.NONE, 0, 123, responseMap));
consumerClient.poll(time.timer(0));
}
}
@ -3483,7 +3538,6 @@ public class FetcherTest {
commitTransaction(buffer, 1L, currentOffset);
buffer.flip();
List<FetchResponse.AbortedTransaction> abortedTransactions = new ArrayList<>();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
assignFromUser(singleton(tp0));
@ -3496,7 +3550,7 @@ public class FetcherTest {
FetchRequest request = (FetchRequest) body;
assertEquals(IsolationLevel.READ_COMMITTED, request.isolationLevel());
return true;
}, fullFetchResponseWithAbortedTransactions(records, abortedTransactions, Errors.NONE, 100L, 100L, 0));
}, fullFetchResponseWithAbortedTransactions(records, Collections.emptyList(), Errors.NONE, 100L, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
@ -4463,41 +4517,66 @@ public class FetcherTest {
return new ListOffsetsResponse(data);
}
private FetchResponse<MemoryRecords> fullFetchResponseWithAbortedTransactions(MemoryRecords records,
List<FetchResponse.AbortedTransaction> abortedTransactions,
private FetchResponse fullFetchResponseWithAbortedTransactions(MemoryRecords records,
List<FetchResponseData.AbortedTransaction> abortedTransactions,
Errors error,
long lastStableOffset,
long hw,
int throttleTime) {
Map<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> partitions = Collections.singletonMap(tp0,
new FetchResponse.PartitionData<>(error, hw, lastStableOffset, 0L, abortedTransactions, records));
return new FetchResponse<>(Errors.NONE, new LinkedHashMap<>(partitions), throttleTime, INVALID_SESSION_ID);
Map<TopicPartition, FetchResponseData.PartitionData> partitions = Collections.singletonMap(tp0,
new FetchResponseData.PartitionData()
.setPartitionIndex(tp0.partition())
.setErrorCode(error.code())
.setHighWatermark(hw)
.setLastStableOffset(lastStableOffset)
.setLogStartOffset(0)
.setAbortedTransactions(abortedTransactions)
.setRecords(records));
return FetchResponse.of(Errors.NONE, throttleTime, INVALID_SESSION_ID, new LinkedHashMap<>(partitions));
}
private FetchResponse<MemoryRecords> fullFetchResponse(TopicPartition tp, MemoryRecords records, Errors error, long hw, int throttleTime) {
private FetchResponse fullFetchResponse(TopicPartition tp, MemoryRecords records, Errors error, long hw, int throttleTime) {
return fullFetchResponse(tp, records, error, hw, FetchResponse.INVALID_LAST_STABLE_OFFSET, throttleTime);
}
private FetchResponse<MemoryRecords> fullFetchResponse(TopicPartition tp, MemoryRecords records, Errors error, long hw,
private FetchResponse fullFetchResponse(TopicPartition tp, MemoryRecords records, Errors error, long hw,
long lastStableOffset, int throttleTime) {
Map<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> partitions = Collections.singletonMap(tp,
new FetchResponse.PartitionData<>(error, hw, lastStableOffset, 0L, null, records));
return new FetchResponse<>(Errors.NONE, new LinkedHashMap<>(partitions), throttleTime, INVALID_SESSION_ID);
Map<TopicPartition, FetchResponseData.PartitionData> partitions = Collections.singletonMap(tp,
new FetchResponseData.PartitionData()
.setPartitionIndex(tp.partition())
.setErrorCode(error.code())
.setHighWatermark(hw)
.setLastStableOffset(lastStableOffset)
.setLogStartOffset(0)
.setRecords(records));
return FetchResponse.of(Errors.NONE, throttleTime, INVALID_SESSION_ID, new LinkedHashMap<>(partitions));
}
private FetchResponse<MemoryRecords> fullFetchResponse(TopicPartition tp, MemoryRecords records, Errors error, long hw,
private FetchResponse fullFetchResponse(TopicPartition tp, MemoryRecords records, Errors error, long hw,
long lastStableOffset, int throttleTime, Optional<Integer> preferredReplicaId) {
Map<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> partitions = Collections.singletonMap(tp,
new FetchResponse.PartitionData<>(error, hw, lastStableOffset, 0L,
preferredReplicaId, null, records));
return new FetchResponse<>(Errors.NONE, new LinkedHashMap<>(partitions), throttleTime, INVALID_SESSION_ID);
Map<TopicPartition, FetchResponseData.PartitionData> partitions = Collections.singletonMap(tp,
new FetchResponseData.PartitionData()
.setPartitionIndex(tp.partition())
.setErrorCode(error.code())
.setHighWatermark(hw)
.setLastStableOffset(lastStableOffset)
.setLogStartOffset(0)
.setRecords(records)
.setPreferredReadReplica(preferredReplicaId.orElse(FetchResponse.INVALID_PREFERRED_REPLICA_ID)));
return FetchResponse.of(Errors.NONE, throttleTime, INVALID_SESSION_ID, new LinkedHashMap<>(partitions));
}
private FetchResponse<MemoryRecords> fetchResponse(TopicPartition tp, MemoryRecords records, Errors error, long hw,
private FetchResponse fetchResponse(TopicPartition tp, MemoryRecords records, Errors error, long hw,
long lastStableOffset, long logStartOffset, int throttleTime) {
Map<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> partitions = Collections.singletonMap(tp,
new FetchResponse.PartitionData<>(error, hw, lastStableOffset, logStartOffset, null, records));
return new FetchResponse<>(Errors.NONE, new LinkedHashMap<>(partitions), throttleTime, INVALID_SESSION_ID);
Map<TopicPartition, FetchResponseData.PartitionData> partitions = Collections.singletonMap(tp,
new FetchResponseData.PartitionData()
.setPartitionIndex(tp.partition())
.setErrorCode(error.code())
.setHighWatermark(hw)
.setLastStableOffset(lastStableOffset)
.setLogStartOffset(logStartOffset)
.setRecords(records));
return FetchResponse.of(Errors.NONE, throttleTime, INVALID_SESSION_ID, new LinkedHashMap<>(partitions));
}
private MetadataResponse newMetadataResponse(String topic, Errors error) {

View File

@ -803,15 +803,18 @@ public class RequestResponseTest {
@Test
public void fetchResponseVersionTest() {
LinkedHashMap<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> responseData = new LinkedHashMap<>();
LinkedHashMap<TopicPartition, FetchResponseData.PartitionData> responseData = new LinkedHashMap<>();
MemoryRecords records = MemoryRecords.readableRecords(ByteBuffer.allocate(10));
responseData.put(new TopicPartition("test", 0), new FetchResponse.PartitionData<>(
Errors.NONE, 1000000, FetchResponse.INVALID_LAST_STABLE_OFFSET,
0L, Optional.empty(), Collections.emptyList(), records));
responseData.put(new TopicPartition("test", 0),
new FetchResponseData.PartitionData()
.setPartitionIndex(0)
.setHighWatermark(1000000)
.setLogStartOffset(0)
.setRecords(records));
FetchResponse<MemoryRecords> v0Response = new FetchResponse<>(Errors.NONE, responseData, 0, INVALID_SESSION_ID);
FetchResponse<MemoryRecords> v1Response = new FetchResponse<>(Errors.NONE, responseData, 10, INVALID_SESSION_ID);
FetchResponse v0Response = FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, responseData);
FetchResponse v1Response = FetchResponse.of(Errors.NONE, 10, INVALID_SESSION_ID, responseData);
assertEquals(0, v0Response.throttleTimeMs(), "Throttle time must be zero");
assertEquals(10, v1Response.throttleTimeMs(), "Throttle time must be 10");
assertEquals(responseData, v0Response.responseData(), "Response data does not match");
@ -820,22 +823,34 @@ public class RequestResponseTest {
@Test
public void testFetchResponseV4() {
LinkedHashMap<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> responseData = new LinkedHashMap<>();
LinkedHashMap<TopicPartition, FetchResponseData.PartitionData> responseData = new LinkedHashMap<>();
MemoryRecords records = MemoryRecords.readableRecords(ByteBuffer.allocate(10));
List<FetchResponse.AbortedTransaction> abortedTransactions = asList(
new FetchResponse.AbortedTransaction(10, 100),
new FetchResponse.AbortedTransaction(15, 50)
List<FetchResponseData.AbortedTransaction> abortedTransactions = asList(
new FetchResponseData.AbortedTransaction().setProducerId(10).setFirstOffset(100),
new FetchResponseData.AbortedTransaction().setProducerId(15).setFirstOffset(50)
);
responseData.put(new TopicPartition("bar", 0), new FetchResponse.PartitionData<>(Errors.NONE, 100000,
FetchResponse.INVALID_LAST_STABLE_OFFSET, FetchResponse.INVALID_LOG_START_OFFSET, Optional.empty(), abortedTransactions, records));
responseData.put(new TopicPartition("bar", 1), new FetchResponse.PartitionData<>(Errors.NONE, 900000,
5, FetchResponse.INVALID_LOG_START_OFFSET, Optional.empty(), null, records));
responseData.put(new TopicPartition("foo", 0), new FetchResponse.PartitionData<>(Errors.NONE, 70000,
6, FetchResponse.INVALID_LOG_START_OFFSET, Optional.empty(), emptyList(), records));
responseData.put(new TopicPartition("bar", 0),
new FetchResponseData.PartitionData()
.setPartitionIndex(0)
.setHighWatermark(1000000)
.setAbortedTransactions(abortedTransactions)
.setRecords(records));
responseData.put(new TopicPartition("bar", 1),
new FetchResponseData.PartitionData()
.setPartitionIndex(1)
.setHighWatermark(900000)
.setLastStableOffset(5)
.setRecords(records));
responseData.put(new TopicPartition("foo", 0),
new FetchResponseData.PartitionData()
.setPartitionIndex(0)
.setHighWatermark(70000)
.setLastStableOffset(6)
.setRecords(records));
FetchResponse<MemoryRecords> response = new FetchResponse<>(Errors.NONE, responseData, 10, INVALID_SESSION_ID);
FetchResponse<MemoryRecords> deserialized = FetchResponse.parse(response.serialize((short) 4), (short) 4);
FetchResponse response = FetchResponse.of(Errors.NONE, 10, INVALID_SESSION_ID, responseData);
FetchResponse deserialized = FetchResponse.parse(response.serialize((short) 4), (short) 4);
assertEquals(responseData, deserialized.responseData());
}
@ -849,7 +864,7 @@ public class RequestResponseTest {
}
}
private void verifyFetchResponseFullWrite(short apiVersion, FetchResponse<MemoryRecords> fetchResponse) throws Exception {
private void verifyFetchResponseFullWrite(short apiVersion, FetchResponse fetchResponse) throws Exception {
int correlationId = 15;
short responseHeaderVersion = FETCH.responseHeaderVersion(apiVersion);
@ -1158,38 +1173,49 @@ public class RequestResponseTest {
return FetchRequest.Builder.forConsumer(100, 100000, fetchData).setMaxBytes(1000).build((short) version);
}
private FetchResponse<MemoryRecords> createFetchResponse(Errors error, int sessionId) {
return new FetchResponse<>(error, new LinkedHashMap<>(), 25, sessionId);
private FetchResponse createFetchResponse(Errors error, int sessionId) {
return FetchResponse.of(error, 25, sessionId, new LinkedHashMap<>());
}
private FetchResponse<MemoryRecords> createFetchResponse(int sessionId) {
LinkedHashMap<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> responseData = new LinkedHashMap<>();
private FetchResponse createFetchResponse(int sessionId) {
LinkedHashMap<TopicPartition, FetchResponseData.PartitionData> responseData = new LinkedHashMap<>();
MemoryRecords records = MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("blah".getBytes()));
responseData.put(new TopicPartition("test", 0), new FetchResponse.PartitionData<>(Errors.NONE,
1000000, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, Optional.empty(), Collections.emptyList(), records));
List<FetchResponse.AbortedTransaction> abortedTransactions = Collections.singletonList(
new FetchResponse.AbortedTransaction(234L, 999L));
responseData.put(new TopicPartition("test", 1), new FetchResponse.PartitionData<>(Errors.NONE,
1000000, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, Optional.empty(), abortedTransactions, MemoryRecords.EMPTY));
return new FetchResponse<>(Errors.NONE, responseData, 25, sessionId);
responseData.put(new TopicPartition("test", 0), new FetchResponseData.PartitionData()
.setPartitionIndex(0)
.setHighWatermark(1000000)
.setLogStartOffset(0)
.setRecords(records));
List<FetchResponseData.AbortedTransaction> abortedTransactions = Collections.singletonList(
new FetchResponseData.AbortedTransaction().setProducerId(234L).setFirstOffset(999L));
responseData.put(new TopicPartition("test", 1), new FetchResponseData.PartitionData()
.setPartitionIndex(1)
.setHighWatermark(1000000)
.setLogStartOffset(0)
.setAbortedTransactions(abortedTransactions));
return FetchResponse.of(Errors.NONE, 25, sessionId, responseData);
}
private FetchResponse<MemoryRecords> createFetchResponse(boolean includeAborted) {
LinkedHashMap<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> responseData = new LinkedHashMap<>();
private FetchResponse createFetchResponse(boolean includeAborted) {
LinkedHashMap<TopicPartition, FetchResponseData.PartitionData> responseData = new LinkedHashMap<>();
MemoryRecords records = MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("blah".getBytes()));
responseData.put(new TopicPartition("test", 0), new FetchResponseData.PartitionData()
.setPartitionIndex(0)
.setHighWatermark(1000000)
.setLogStartOffset(0)
.setRecords(records));
responseData.put(new TopicPartition("test", 0), new FetchResponse.PartitionData<>(Errors.NONE,
1000000, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, Optional.empty(), Collections.emptyList(), records));
List<FetchResponse.AbortedTransaction> abortedTransactions = Collections.emptyList();
List<FetchResponseData.AbortedTransaction> abortedTransactions = Collections.emptyList();
if (includeAborted) {
abortedTransactions = Collections.singletonList(
new FetchResponse.AbortedTransaction(234L, 999L));
new FetchResponseData.AbortedTransaction().setProducerId(234L).setFirstOffset(999L));
}
responseData.put(new TopicPartition("test", 1), new FetchResponse.PartitionData<>(Errors.NONE,
1000000, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, Optional.empty(), abortedTransactions, MemoryRecords.EMPTY));
responseData.put(new TopicPartition("test", 1), new FetchResponseData.PartitionData()
.setPartitionIndex(1)
.setHighWatermark(1000000)
.setLogStartOffset(0)
.setAbortedTransactions(abortedTransactions));
return new FetchResponse<>(Errors.NONE, responseData, 25, INVALID_SESSION_ID);
return FetchResponse.of(Errors.NONE, 25, INVALID_SESSION_ID, responseData);
}
private HeartbeatRequest createHeartBeatRequest() {

View File

@ -40,7 +40,6 @@ import org.apache.kafka.common.errors._
import org.apache.kafka.common.message.{DescribeProducersResponseData, FetchResponseData}
import org.apache.kafka.common.record.FileRecords.TimestampAndOffset
import org.apache.kafka.common.record._
import org.apache.kafka.common.requests.FetchResponse.AbortedTransaction
import org.apache.kafka.common.requests.ListOffsetsRequest
import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH_OFFSET
import org.apache.kafka.common.requests.ProduceResponse.RecordError
@ -1572,7 +1571,7 @@ class Log(@volatile private var _dir: File,
private def emptyFetchDataInfo(fetchOffsetMetadata: LogOffsetMetadata,
includeAbortedTxns: Boolean): FetchDataInfo = {
val abortedTransactions =
if (includeAbortedTxns) Some(List.empty[AbortedTransaction])
if (includeAbortedTxns) Some(List.empty[FetchResponseData.AbortedTransaction])
else None
FetchDataInfo(fetchOffsetMetadata,
MemoryRecords.EMPTY,
@ -1676,7 +1675,7 @@ class Log(@volatile private var _dir: File,
logEndOffset
}
val abortedTransactions = ListBuffer.empty[AbortedTransaction]
val abortedTransactions = ListBuffer.empty[FetchResponseData.AbortedTransaction]
def accumulator(abortedTxns: List[AbortedTxn]): Unit = abortedTransactions ++= abortedTxns.map(_.asAbortedTransaction)
collectAbortedTransactions(startOffset, upperBoundOffset, segmentEntry, accumulator)

View File

@ -20,10 +20,9 @@ import java.io.{File, IOException}
import java.nio.ByteBuffer
import java.nio.channels.FileChannel
import java.nio.file.{Files, StandardOpenOption}
import kafka.utils.{Logging, nonthreadsafe}
import org.apache.kafka.common.KafkaException
import org.apache.kafka.common.requests.FetchResponse.AbortedTransaction
import org.apache.kafka.common.message.FetchResponseData
import org.apache.kafka.common.utils.Utils
import scala.collection.mutable.ListBuffer
@ -245,7 +244,9 @@ private[log] class AbortedTxn(val buffer: ByteBuffer) {
def lastStableOffset: Long = buffer.getLong(LastStableOffsetOffset)
def asAbortedTransaction: AbortedTransaction = new AbortedTransaction(producerId, firstOffset)
def asAbortedTransaction: FetchResponseData.AbortedTransaction = new FetchResponseData.AbortedTransaction()
.setProducerId(producerId)
.setFirstOffset(firstOffset)
override def toString: String =
s"AbortedTxn(version=$version, producerId=$producerId, firstOffset=$firstOffset, " +

View File

@ -192,7 +192,7 @@ object RequestChannel extends Logging {
resources.add(newResource)
}
val data = new IncrementalAlterConfigsRequestData()
.setValidateOnly(alterConfigs.data().validateOnly())
.setValidateOnly(alterConfigs.data.validateOnly())
.setResources(resources)
new IncrementalAlterConfigsRequest.Builder(data).build(alterConfigs.version)

View File

@ -135,7 +135,7 @@ object RequestConvertToJson {
case res: EndQuorumEpochResponse => EndQuorumEpochResponseDataJsonConverter.write(res.data, version)
case res: EnvelopeResponse => EnvelopeResponseDataJsonConverter.write(res.data, version)
case res: ExpireDelegationTokenResponse => ExpireDelegationTokenResponseDataJsonConverter.write(res.data, version)
case res: FetchResponse[_] => FetchResponseDataJsonConverter.write(res.data, version, false)
case res: FetchResponse => FetchResponseDataJsonConverter.write(res.data, version, false)
case res: FindCoordinatorResponse => FindCoordinatorResponseDataJsonConverter.write(res.data, version)
case res: HeartbeatResponse => HeartbeatResponseDataJsonConverter.write(res.data, version)
case res: IncrementalAlterConfigsResponse => IncrementalAlterConfigsResponseDataJsonConverter.write(res.data, version)

View File

@ -17,37 +17,33 @@
package kafka.server
import kafka.cluster.BrokerEndPoint
import kafka.common.ClientIdAndBroker
import kafka.log.LogAppendInfo
import kafka.metrics.KafkaMetricsGroup
import kafka.server.AbstractFetcherThread.{ReplicaFetch, ResultWithPartitions}
import kafka.utils.CoreUtils.inLock
import kafka.utils.Implicits._
import kafka.utils.{DelayedItem, Pool, ShutdownableThread}
import org.apache.kafka.common.errors._
import org.apache.kafka.common.internals.PartitionStates
import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset
import org.apache.kafka.common.message.{FetchResponseData, OffsetForLeaderEpochRequestData}
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.record.{FileRecords, MemoryRecords, Records}
import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.{UNDEFINED_EPOCH, UNDEFINED_EPOCH_OFFSET}
import org.apache.kafka.common.requests._
import org.apache.kafka.common.{InvalidRecordException, TopicPartition}
import java.nio.ByteBuffer
import java.util
import java.util.Optional
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicLong
import java.util.concurrent.locks.ReentrantLock
import kafka.cluster.BrokerEndPoint
import kafka.utils.{DelayedItem, Pool, ShutdownableThread}
import kafka.utils.Implicits._
import org.apache.kafka.common.errors._
import kafka.common.ClientIdAndBroker
import kafka.metrics.KafkaMetricsGroup
import kafka.utils.CoreUtils.inLock
import org.apache.kafka.common.protocol.Errors
import scala.collection.{Map, Set, mutable}
import scala.compat.java8.OptionConverters._
import scala.jdk.CollectionConverters._
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicLong
import kafka.log.LogAppendInfo
import kafka.server.AbstractFetcherThread.ReplicaFetch
import kafka.server.AbstractFetcherThread.ResultWithPartitions
import org.apache.kafka.common.{InvalidRecordException, TopicPartition}
import org.apache.kafka.common.internals.PartitionStates
import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData
import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset
import org.apache.kafka.common.record.{FileRecords, MemoryRecords, Records}
import org.apache.kafka.common.requests._
import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.{UNDEFINED_EPOCH, UNDEFINED_EPOCH_OFFSET}
import scala.math._
/**
@ -62,7 +58,7 @@ abstract class AbstractFetcherThread(name: String,
val brokerTopicStats: BrokerTopicStats) //BrokerTopicStats's lifecycle managed by ReplicaManager
extends ShutdownableThread(name, isInterruptible) {
type FetchData = FetchResponse.PartitionData[Records]
type FetchData = FetchResponseData.PartitionData
type EpochData = OffsetForLeaderEpochRequestData.OffsetForLeaderPartition
private val partitionStates = new PartitionStates[PartitionFetchState]
@ -340,7 +336,7 @@ abstract class AbstractFetcherThread(name: String,
// the current offset is the same as the offset requested.
val fetchPartitionData = sessionPartitions.get(topicPartition)
if (fetchPartitionData != null && fetchPartitionData.fetchOffset == currentFetchState.fetchOffset && currentFetchState.isReadyForFetch) {
partitionData.error match {
Errors.forCode(partitionData.errorCode) match {
case Errors.NONE =>
try {
// Once we hand off the partition data to the subclass, we can't mess with it any more in this thread
@ -364,7 +360,7 @@ abstract class AbstractFetcherThread(name: String,
}
}
if (isTruncationOnFetchSupported) {
partitionData.divergingEpoch.ifPresent { divergingEpoch =>
FetchResponse.divergingEpoch(partitionData).ifPresent { divergingEpoch =>
divergingEndOffsets += topicPartition -> new EpochEndOffset()
.setPartition(topicPartition.partition)
.setErrorCode(Errors.NONE.code)
@ -416,9 +412,8 @@ abstract class AbstractFetcherThread(name: String,
"expected to persist.")
partitionsWithError += topicPartition
case _ =>
error(s"Error for partition $topicPartition at offset ${currentFetchState.fetchOffset}",
partitionData.error.exception)
case partitionError =>
error(s"Error for partition $topicPartition at offset ${currentFetchState.fetchOffset}", partitionError.exception)
partitionsWithError += topicPartition
}
}

View File

@ -34,7 +34,6 @@ import org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicRe
import org.apache.kafka.common.message.MetadataResponseData.MetadataResponseBroker
import org.apache.kafka.common.message.{BeginQuorumEpochResponseData, BrokerHeartbeatResponseData, BrokerRegistrationResponseData, CreateTopicsResponseData, DescribeQuorumResponseData, EndQuorumEpochResponseData, FetchResponseData, MetadataResponseData, SaslAuthenticateResponseData, SaslHandshakeResponseData, UnregisterBrokerResponseData, VoteResponseData}
import org.apache.kafka.common.protocol.{ApiKeys, ApiMessage, Errors}
import org.apache.kafka.common.record.BaseRecords
import org.apache.kafka.common.requests._
import org.apache.kafka.common.resource.Resource
import org.apache.kafka.common.resource.Resource.CLUSTER_NAME
@ -116,7 +115,7 @@ class ControllerApis(val requestChannel: RequestChannel,
private def handleFetch(request: RequestChannel.Request): Unit = {
authHelper.authorizeClusterOperation(request, CLUSTER_ACTION)
handleRaftRequest(request, response => new FetchResponse[BaseRecords](response.asInstanceOf[FetchResponseData]))
handleRaftRequest(request, response => new FetchResponse(response.asInstanceOf[FetchResponseData]))
}
def handleMetadataRequest(request: RequestChannel.Request): Unit = {

View File

@ -17,8 +17,8 @@
package kafka.server
import org.apache.kafka.common.message.FetchResponseData
import org.apache.kafka.common.record.Records
import org.apache.kafka.common.requests.FetchResponse.AbortedTransaction
sealed trait FetchIsolation
case object FetchLogEnd extends FetchIsolation
@ -28,4 +28,4 @@ case object FetchTxnCommitted extends FetchIsolation
case class FetchDataInfo(fetchOffsetMetadata: LogOffsetMetadata,
records: Records,
firstEntryIncomplete: Boolean = false,
abortedTransactions: Option[List[AbortedTransaction]] = None)
abortedTransactions: Option[List[FetchResponseData.AbortedTransaction]] = None)

View File

@ -17,27 +17,26 @@
package kafka.server
import java.util
import java.util.Optional
import java.util.concurrent.{ThreadLocalRandom, TimeUnit}
import kafka.metrics.KafkaMetricsGroup
import kafka.utils.Logging
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.message.FetchResponseData
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.record.Records
import org.apache.kafka.common.requests.FetchMetadata.{FINAL_EPOCH, INITIAL_EPOCH, INVALID_SESSION_ID}
import org.apache.kafka.common.requests.{FetchRequest, FetchResponse, FetchMetadata => JFetchMetadata}
import org.apache.kafka.common.utils.{ImplicitLinkedHashCollection, Time, Utils}
import scala.math.Ordered.orderingToOrdered
import java.util
import java.util.Optional
import java.util.concurrent.{ThreadLocalRandom, TimeUnit}
import scala.collection.{mutable, _}
import scala.math.Ordered.orderingToOrdered
object FetchSession {
type REQ_MAP = util.Map[TopicPartition, FetchRequest.PartitionData]
type RESP_MAP = util.LinkedHashMap[TopicPartition, FetchResponse.PartitionData[Records]]
type RESP_MAP = util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
type CACHE_MAP = ImplicitLinkedHashCollection[CachedPartition]
type RESP_MAP_ITER = util.Iterator[util.Map.Entry[TopicPartition, FetchResponse.PartitionData[Records]]]
type RESP_MAP_ITER = util.Iterator[util.Map.Entry[TopicPartition, FetchResponseData.PartitionData]]
val NUM_INCREMENTAL_FETCH_SESSISONS = "NumIncrementalFetchSessions"
val NUM_INCREMENTAL_FETCH_PARTITIONS_CACHED = "NumIncrementalFetchPartitionsCached"
@ -100,7 +99,7 @@ class CachedPartition(val topic: String,
reqData.currentLeaderEpoch, reqData.logStartOffset, -1, reqData.lastFetchedEpoch)
def this(part: TopicPartition, reqData: FetchRequest.PartitionData,
respData: FetchResponse.PartitionData[Records]) =
respData: FetchResponseData.PartitionData) =
this(part.topic, part.partition, reqData.maxBytes, reqData.fetchOffset, respData.highWatermark,
reqData.currentLeaderEpoch, reqData.logStartOffset, respData.logStartOffset, reqData.lastFetchedEpoch)
@ -125,10 +124,10 @@ class CachedPartition(val topic: String,
* @param updateResponseData if set to true, update this CachedPartition with new request and response data.
* @return True if this partition should be included in the response; false if it can be omitted.
*/
def maybeUpdateResponseData(respData: FetchResponse.PartitionData[Records], updateResponseData: Boolean): Boolean = {
def maybeUpdateResponseData(respData: FetchResponseData.PartitionData, updateResponseData: Boolean): Boolean = {
// Check the response data.
var mustRespond = false
if ((respData.records != null) && (respData.records.sizeInBytes > 0)) {
if (FetchResponse.recordsSize(respData) > 0) {
// Partitions with new data are always included in the response.
mustRespond = true
}
@ -142,11 +141,11 @@ class CachedPartition(val topic: String,
if (updateResponseData)
localLogStartOffset = respData.logStartOffset
}
if (respData.preferredReadReplica.isPresent) {
if (FetchResponse.isPreferredReplica(respData)) {
// If the broker computed a preferred read replica, we need to include it in the response
mustRespond = true
}
if (respData.error.code != 0) {
if (respData.errorCode != Errors.NONE.code) {
// Partitions with errors are always included in the response.
// We also set the cached highWatermark to an invalid offset, -1.
// This ensures that when the error goes away, we re-send the partition.
@ -154,7 +153,8 @@ class CachedPartition(val topic: String,
highWatermark = -1
mustRespond = true
}
if (respData.divergingEpoch.isPresent) {
if (FetchResponse.isDivergingEpoch(respData)) {
// Partitions with diverging epoch are always included in response to trigger truncation.
mustRespond = true
}
@ -163,7 +163,7 @@ class CachedPartition(val topic: String,
override def hashCode: Int = (31 * partition) + topic.hashCode
def canEqual(that: Any) = that.isInstanceOf[CachedPartition]
def canEqual(that: Any): Boolean = that.isInstanceOf[CachedPartition]
override def equals(that: Any): Boolean =
that match {
@ -292,7 +292,7 @@ trait FetchContext extends Logging {
* Updates the fetch context with new partition information. Generates response data.
* The response data may require subsequent down-conversion.
*/
def updateAndGenerateResponseData(updates: FetchSession.RESP_MAP): FetchResponse[Records]
def updateAndGenerateResponseData(updates: FetchSession.RESP_MAP): FetchResponse
def partitionsToLogString(partitions: util.Collection[TopicPartition]): String =
FetchSession.partitionsToLogString(partitions, isTraceEnabled)
@ -300,8 +300,8 @@ trait FetchContext extends Logging {
/**
* Return an empty throttled response due to quota violation.
*/
def getThrottledResponse(throttleTimeMs: Int): FetchResponse[Records] =
new FetchResponse(Errors.NONE, new FetchSession.RESP_MAP, throttleTimeMs, INVALID_SESSION_ID)
def getThrottledResponse(throttleTimeMs: Int): FetchResponse =
FetchResponse.of(Errors.NONE, throttleTimeMs, INVALID_SESSION_ID, new FetchSession.RESP_MAP)
}
/**
@ -318,9 +318,9 @@ class SessionErrorContext(val error: Errors,
}
// Because of the fetch session error, we don't know what partitions were supposed to be in this request.
override def updateAndGenerateResponseData(updates: FetchSession.RESP_MAP): FetchResponse[Records] = {
override def updateAndGenerateResponseData(updates: FetchSession.RESP_MAP): FetchResponse = {
debug(s"Session error fetch context returning $error")
new FetchResponse(error, new FetchSession.RESP_MAP, 0, INVALID_SESSION_ID)
FetchResponse.of(error, 0, INVALID_SESSION_ID, new FetchSession.RESP_MAP)
}
}
@ -341,9 +341,9 @@ class SessionlessFetchContext(val fetchData: util.Map[TopicPartition, FetchReque
FetchResponse.sizeOf(versionId, updates.entrySet.iterator)
}
override def updateAndGenerateResponseData(updates: FetchSession.RESP_MAP): FetchResponse[Records] = {
override def updateAndGenerateResponseData(updates: FetchSession.RESP_MAP): FetchResponse = {
debug(s"Sessionless fetch context returning ${partitionsToLogString(updates.keySet)}")
new FetchResponse(Errors.NONE, updates, 0, INVALID_SESSION_ID)
FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, updates)
}
}
@ -372,7 +372,7 @@ class FullFetchContext(private val time: Time,
FetchResponse.sizeOf(versionId, updates.entrySet.iterator)
}
override def updateAndGenerateResponseData(updates: FetchSession.RESP_MAP): FetchResponse[Records] = {
override def updateAndGenerateResponseData(updates: FetchSession.RESP_MAP): FetchResponse = {
def createNewSession: FetchSession.CACHE_MAP = {
val cachedPartitions = new FetchSession.CACHE_MAP(updates.size)
updates.forEach { (part, respData) =>
@ -385,7 +385,7 @@ class FullFetchContext(private val time: Time,
updates.size, () => createNewSession)
debug(s"Full fetch context with session id $responseSessionId returning " +
s"${partitionsToLogString(updates.keySet)}")
new FetchResponse(Errors.NONE, updates, 0, responseSessionId)
FetchResponse.of(Errors.NONE, 0, responseSessionId, updates)
}
}
@ -417,7 +417,7 @@ class IncrementalFetchContext(private val time: Time,
private class PartitionIterator(val iter: FetchSession.RESP_MAP_ITER,
val updateFetchContextAndRemoveUnselected: Boolean)
extends FetchSession.RESP_MAP_ITER {
var nextElement: util.Map.Entry[TopicPartition, FetchResponse.PartitionData[Records]] = null
var nextElement: util.Map.Entry[TopicPartition, FetchResponseData.PartitionData] = null
override def hasNext: Boolean = {
while ((nextElement == null) && iter.hasNext) {
@ -441,7 +441,7 @@ class IncrementalFetchContext(private val time: Time,
nextElement != null
}
override def next(): util.Map.Entry[TopicPartition, FetchResponse.PartitionData[Records]] = {
override def next(): util.Map.Entry[TopicPartition, FetchResponseData.PartitionData] = {
if (!hasNext) throw new NoSuchElementException
val element = nextElement
nextElement = null
@ -463,7 +463,7 @@ class IncrementalFetchContext(private val time: Time,
}
}
override def updateAndGenerateResponseData(updates: FetchSession.RESP_MAP): FetchResponse[Records] = {
override def updateAndGenerateResponseData(updates: FetchSession.RESP_MAP): FetchResponse = {
session.synchronized {
// Check to make sure that the session epoch didn't change in between
// creating this fetch context and generating this response.
@ -471,7 +471,7 @@ class IncrementalFetchContext(private val time: Time,
if (session.epoch != expectedEpoch) {
info(s"Incremental fetch session ${session.id} expected epoch $expectedEpoch, but " +
s"got ${session.epoch}. Possible duplicate request.")
new FetchResponse(Errors.INVALID_FETCH_SESSION_EPOCH, new FetchSession.RESP_MAP, 0, session.id)
FetchResponse.of(Errors.INVALID_FETCH_SESSION_EPOCH, 0, session.id, new FetchSession.RESP_MAP)
} else {
// Iterate over the update list using PartitionIterator. This will prune updates which don't need to be sent
val partitionIter = new PartitionIterator(updates.entrySet.iterator, true)
@ -480,12 +480,12 @@ class IncrementalFetchContext(private val time: Time,
}
debug(s"Incremental fetch context with session id ${session.id} returning " +
s"${partitionsToLogString(updates.keySet)}")
new FetchResponse(Errors.NONE, updates, 0, session.id)
FetchResponse.of(Errors.NONE, 0, session.id, updates)
}
}
}
override def getThrottledResponse(throttleTimeMs: Int): FetchResponse[Records] = {
override def getThrottledResponse(throttleTimeMs: Int): FetchResponse = {
session.synchronized {
// Check to make sure that the session epoch didn't change in between
// creating this fetch context and generating this response.
@ -493,9 +493,9 @@ class IncrementalFetchContext(private val time: Time,
if (session.epoch != expectedEpoch) {
info(s"Incremental fetch session ${session.id} expected epoch $expectedEpoch, but " +
s"got ${session.epoch}. Possible duplicate request.")
new FetchResponse(Errors.INVALID_FETCH_SESSION_EPOCH, new FetchSession.RESP_MAP, throttleTimeMs, session.id)
FetchResponse.of(Errors.INVALID_FETCH_SESSION_EPOCH, throttleTimeMs, session.id, new FetchSession.RESP_MAP)
} else {
new FetchResponse(Errors.NONE, new FetchSession.RESP_MAP, throttleTimeMs, session.id)
FetchResponse.of(Errors.NONE, throttleTimeMs, session.id, new FetchSession.RESP_MAP)
}
}
}

View File

@ -17,24 +17,18 @@
package kafka.server
import java.lang.{Long => JLong}
import java.nio.ByteBuffer
import java.util
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.atomic.AtomicInteger
import java.util.{Collections, Optional}
import kafka.admin.AdminUtils
import kafka.api.{ApiVersion, ElectLeadersRequestOps, KAFKA_0_11_0_IV0, KAFKA_2_3_IV0}
import kafka.common.OffsetAndMetadata
import kafka.controller.ReplicaAssignment
import kafka.coordinator.group.{GroupCoordinator, JoinGroupResult, LeaveGroupResult, SyncGroupResult}
import kafka.coordinator.group._
import kafka.coordinator.transaction.{InitProducerIdResult, TransactionCoordinator}
import kafka.log.AppendOrigin
import kafka.message.ZStdCompressionCodec
import kafka.network.RequestChannel
import kafka.security.authorizer.AuthorizerUtils
import kafka.server.QuotaFactory.{QuotaManagers, UnboundedQuota}
import kafka.server.metadata.ConfigRepository
import kafka.utils.Implicits._
import kafka.utils.{CoreUtils, Logging}
import org.apache.kafka.clients.admin.AlterConfigOp.OpType
@ -61,7 +55,7 @@ import org.apache.kafka.common.message.ListOffsetsResponseData.{ListOffsetsParti
import org.apache.kafka.common.message.MetadataResponseData.{MetadataResponsePartition, MetadataResponseTopic}
import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderTopic
import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.{EpochEndOffset, OffsetForLeaderTopicResult, OffsetForLeaderTopicResultCollection}
import org.apache.kafka.common.message.{AddOffsetsToTxnResponseData, AlterClientQuotasResponseData, AlterConfigsResponseData, AlterPartitionReassignmentsResponseData, AlterReplicaLogDirsResponseData, CreateAclsResponseData, CreatePartitionsResponseData, CreateTopicsResponseData, DeleteAclsResponseData, DeleteGroupsResponseData, DeleteRecordsResponseData, DeleteTopicsResponseData, DescribeAclsResponseData, DescribeClientQuotasResponseData, DescribeClusterResponseData, DescribeConfigsResponseData, DescribeGroupsResponseData, DescribeLogDirsResponseData, DescribeProducersResponseData, DescribeTransactionsResponseData, EndTxnResponseData, ExpireDelegationTokenResponseData, FindCoordinatorResponseData, HeartbeatResponseData, InitProducerIdResponseData, JoinGroupResponseData, LeaveGroupResponseData, ListGroupsResponseData, ListOffsetsResponseData, ListPartitionReassignmentsResponseData, OffsetCommitRequestData, OffsetCommitResponseData, OffsetDeleteResponseData, OffsetForLeaderEpochResponseData, RenewDelegationTokenResponseData, SaslAuthenticateResponseData, SaslHandshakeResponseData, StopReplicaResponseData, SyncGroupResponseData, UpdateMetadataResponseData}
import org.apache.kafka.common.message._
import org.apache.kafka.common.metrics.Metrics
import org.apache.kafka.common.network.{ListenerName, Send}
import org.apache.kafka.common.protocol.{ApiKeys, Errors}
@ -80,14 +74,18 @@ import org.apache.kafka.common.utils.{ProducerIdAndEpoch, Time}
import org.apache.kafka.common.{Node, TopicPartition, Uuid}
import org.apache.kafka.server.authorizer._
import java.lang.{Long => JLong}
import java.nio.ByteBuffer
import java.util
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.atomic.AtomicInteger
import java.util.{Collections, Optional}
import scala.annotation.nowarn
import scala.collection.mutable.ArrayBuffer
import scala.collection.{Map, Seq, Set, immutable, mutable}
import scala.compat.java8.OptionConverters._
import scala.jdk.CollectionConverters._
import scala.util.{Failure, Success, Try}
import kafka.coordinator.group.GroupOverview
import kafka.server.metadata.ConfigRepository
/**
* Logic to handle the various Kafka requests
@ -681,25 +679,20 @@ class KafkaApis(val requestChannel: RequestChannel,
None
}
def errorResponse[T >: MemoryRecords <: BaseRecords](error: Errors): FetchResponse.PartitionData[T] = {
new FetchResponse.PartitionData[T](error, FetchResponse.INVALID_HIGHWATERMARK, FetchResponse.INVALID_LAST_STABLE_OFFSET,
FetchResponse.INVALID_LOG_START_OFFSET, null, MemoryRecords.EMPTY)
}
val erroneous = mutable.ArrayBuffer[(TopicPartition, FetchResponse.PartitionData[Records])]()
val erroneous = mutable.ArrayBuffer[(TopicPartition, FetchResponseData.PartitionData)]()
val interesting = mutable.ArrayBuffer[(TopicPartition, FetchRequest.PartitionData)]()
if (fetchRequest.isFromFollower) {
// The follower must have ClusterAction on ClusterResource in order to fetch partition data.
if (authHelper.authorize(request.context, CLUSTER_ACTION, CLUSTER, CLUSTER_NAME)) {
fetchContext.foreachPartition { (topicPartition, data) =>
if (!metadataCache.contains(topicPartition))
erroneous += topicPartition -> errorResponse(Errors.UNKNOWN_TOPIC_OR_PARTITION)
erroneous += topicPartition -> FetchResponse.partitionResponse(topicPartition.partition, Errors.UNKNOWN_TOPIC_OR_PARTITION)
else
interesting += (topicPartition -> data)
}
} else {
fetchContext.foreachPartition { (part, _) =>
erroneous += part -> errorResponse(Errors.TOPIC_AUTHORIZATION_FAILED)
erroneous += part -> FetchResponse.partitionResponse(part.partition, Errors.TOPIC_AUTHORIZATION_FAILED)
}
}
} else {
@ -711,9 +704,9 @@ class KafkaApis(val requestChannel: RequestChannel,
val authorizedTopics = authHelper.filterByAuthorized(request.context, READ, TOPIC, partitionDatas)(_._1.topic)
partitionDatas.foreach { case (topicPartition, data) =>
if (!authorizedTopics.contains(topicPartition.topic))
erroneous += topicPartition -> errorResponse(Errors.TOPIC_AUTHORIZATION_FAILED)
erroneous += topicPartition -> FetchResponse.partitionResponse(topicPartition.partition, Errors.TOPIC_AUTHORIZATION_FAILED)
else if (!metadataCache.contains(topicPartition))
erroneous += topicPartition -> errorResponse(Errors.UNKNOWN_TOPIC_OR_PARTITION)
erroneous += topicPartition -> FetchResponse.partitionResponse(topicPartition.partition, Errors.UNKNOWN_TOPIC_OR_PARTITION)
else
interesting += (topicPartition -> data)
}
@ -732,12 +725,12 @@ class KafkaApis(val requestChannel: RequestChannel,
}
def maybeConvertFetchedData(tp: TopicPartition,
partitionData: FetchResponse.PartitionData[Records]): FetchResponse.PartitionData[BaseRecords] = {
partitionData: FetchResponseData.PartitionData): FetchResponseData.PartitionData = {
val logConfig = replicaManager.getLogConfig(tp)
if (logConfig.exists(_.compressionType == ZStdCompressionCodec.name) && versionId < 10) {
trace(s"Fetching messages is disabled for ZStandard compressed partition $tp. Sending unsupported version response to $clientId.")
errorResponse(Errors.UNSUPPORTED_COMPRESSION_TYPE)
FetchResponse.partitionResponse(tp.partition, Errors.UNSUPPORTED_COMPRESSION_TYPE)
} else {
// Down-conversion of the fetched records is needed when the stored magic version is
// greater than that supported by the client (as indicated by the fetch request version). If the
@ -746,7 +739,7 @@ class KafkaApis(val requestChannel: RequestChannel,
// know it must be supported. However, if the magic version is changed from a higher version back to a
// lower version, this check will no longer be valid and we will fail to down-convert the messages
// which were written in the new format prior to the version downgrade.
val unconvertedRecords = partitionData.records
val unconvertedRecords = FetchResponse.recordsOrFail(partitionData)
val downConvertMagic =
logConfig.map(_.messageFormatVersion.recordVersion.value).flatMap { magic =>
if (magic > RecordBatch.MAGIC_VALUE_V0 && versionId <= 1 && !unconvertedRecords.hasCompatibleMagic(RecordBatch.MAGIC_VALUE_V0))
@ -762,7 +755,7 @@ class KafkaApis(val requestChannel: RequestChannel,
// For fetch requests from clients, check if down-conversion is disabled for the particular partition
if (!fetchRequest.isFromFollower && !logConfig.forall(_.messageDownConversionEnable)) {
trace(s"Conversion to message format ${downConvertMagic.get} is disabled for partition $tp. Sending unsupported version response to $clientId.")
errorResponse(Errors.UNSUPPORTED_VERSION)
FetchResponse.partitionResponse(tp.partition, Errors.UNSUPPORTED_VERSION)
} else {
try {
trace(s"Down converting records from partition $tp to message format version $magic for fetch request from $clientId")
@ -770,71 +763,77 @@ class KafkaApis(val requestChannel: RequestChannel,
// as possible. With KIP-283, we have the ability to lazily down-convert in a chunked manner. The lazy, chunked
// down-conversion always guarantees that at least one batch of messages is down-converted and sent out to the
// client.
val error = maybeDownConvertStorageError(partitionData.error)
new FetchResponse.PartitionData[BaseRecords](error, partitionData.highWatermark,
partitionData.lastStableOffset, partitionData.logStartOffset,
partitionData.preferredReadReplica, partitionData.abortedTransactions,
new LazyDownConversionRecords(tp, unconvertedRecords, magic, fetchContext.getFetchOffset(tp).get, time))
new FetchResponseData.PartitionData()
.setPartitionIndex(tp.partition)
.setErrorCode(maybeDownConvertStorageError(Errors.forCode(partitionData.errorCode)).code)
.setHighWatermark(partitionData.highWatermark)
.setLastStableOffset(partitionData.lastStableOffset)
.setLogStartOffset(partitionData.logStartOffset)
.setAbortedTransactions(partitionData.abortedTransactions)
.setRecords(new LazyDownConversionRecords(tp, unconvertedRecords, magic, fetchContext.getFetchOffset(tp).get, time))
.setPreferredReadReplica(partitionData.preferredReadReplica())
} catch {
case e: UnsupportedCompressionTypeException =>
trace("Received unsupported compression type error during down-conversion", e)
errorResponse(Errors.UNSUPPORTED_COMPRESSION_TYPE)
FetchResponse.partitionResponse(tp.partition, Errors.UNSUPPORTED_COMPRESSION_TYPE)
}
}
case None =>
val error = maybeDownConvertStorageError(partitionData.error)
new FetchResponse.PartitionData[BaseRecords](error,
partitionData.highWatermark,
partitionData.lastStableOffset,
partitionData.logStartOffset,
partitionData.preferredReadReplica,
partitionData.abortedTransactions,
partitionData.divergingEpoch,
unconvertedRecords)
new FetchResponseData.PartitionData()
.setPartitionIndex(tp.partition)
.setErrorCode(maybeDownConvertStorageError(Errors.forCode(partitionData.errorCode)).code)
.setHighWatermark(partitionData.highWatermark)
.setLastStableOffset(partitionData.lastStableOffset)
.setLogStartOffset(partitionData.logStartOffset)
.setAbortedTransactions(partitionData.abortedTransactions)
.setRecords(unconvertedRecords)
.setPreferredReadReplica(partitionData.preferredReadReplica)
.setDivergingEpoch(partitionData.divergingEpoch)
}
}
}
// the callback for process a fetch response, invoked before throttling
def processResponseCallback(responsePartitionData: Seq[(TopicPartition, FetchPartitionData)]): Unit = {
val partitions = new util.LinkedHashMap[TopicPartition, FetchResponse.PartitionData[Records]]
val partitions = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
val reassigningPartitions = mutable.Set[TopicPartition]()
responsePartitionData.foreach { case (tp, data) =>
val abortedTransactions = data.abortedTransactions.map(_.asJava).orNull
val lastStableOffset = data.lastStableOffset.getOrElse(FetchResponse.INVALID_LAST_STABLE_OFFSET)
if (data.isReassignmentFetch)
reassigningPartitions.add(tp)
val error = maybeDownConvertStorageError(data.error)
partitions.put(tp, new FetchResponse.PartitionData(
error,
data.highWatermark,
lastStableOffset,
data.logStartOffset,
data.preferredReadReplica.map(int2Integer).asJava,
abortedTransactions,
data.divergingEpoch.asJava,
data.records))
if (data.isReassignmentFetch) reassigningPartitions.add(tp)
val partitionData = new FetchResponseData.PartitionData()
.setPartitionIndex(tp.partition)
.setErrorCode(maybeDownConvertStorageError(data.error).code)
.setHighWatermark(data.highWatermark)
.setLastStableOffset(lastStableOffset)
.setLogStartOffset(data.logStartOffset)
.setAbortedTransactions(abortedTransactions)
.setRecords(data.records)
.setPreferredReadReplica(data.preferredReadReplica.getOrElse(FetchResponse.INVALID_PREFERRED_REPLICA_ID))
data.divergingEpoch.foreach(partitionData.setDivergingEpoch)
partitions.put(tp, partitionData)
}
erroneous.foreach { case (tp, data) => partitions.put(tp, data) }
var unconvertedFetchResponse: FetchResponse[Records] = null
var unconvertedFetchResponse: FetchResponse = null
def createResponse(throttleTimeMs: Int): FetchResponse[BaseRecords] = {
def createResponse(throttleTimeMs: Int): FetchResponse = {
// Down-convert messages for each partition if required
val convertedData = new util.LinkedHashMap[TopicPartition, FetchResponse.PartitionData[BaseRecords]]
val convertedData = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
unconvertedFetchResponse.responseData.forEach { (tp, unconvertedPartitionData) =>
if (unconvertedPartitionData.error != Errors.NONE)
val error = Errors.forCode(unconvertedPartitionData.errorCode)
if (error != Errors.NONE)
debug(s"Fetch request with correlation id ${request.header.correlationId} from client $clientId " +
s"on partition $tp failed due to ${unconvertedPartitionData.error.exceptionName}")
s"on partition $tp failed due to ${error.exceptionName}")
convertedData.put(tp, maybeConvertFetchedData(tp, unconvertedPartitionData))
}
// Prepare fetch response from converted data
val response = new FetchResponse(unconvertedFetchResponse.error, convertedData, throttleTimeMs,
unconvertedFetchResponse.sessionId)
val response = FetchResponse.of(unconvertedFetchResponse.error, throttleTimeMs, unconvertedFetchResponse.sessionId, convertedData)
// record the bytes out metrics only when the response is being sent
response.responseData.forEach { (tp, data) =>
brokerTopicStats.updateBytesOut(tp.topic, fetchRequest.isFromFollower, reassigningPartitions.contains(tp), data.records.sizeInBytes)
brokerTopicStats.updateBytesOut(tp.topic, fetchRequest.isFromFollower,
reassigningPartitions.contains(tp), FetchResponse.recordsSize(data))
}
response
}
@ -3367,7 +3366,7 @@ object KafkaApis {
// Traffic from both in-sync and out of sync replicas are accounted for in replication quota to ensure total replication
// traffic doesn't exceed quota.
private[server] def sizeOfThrottledPartitions(versionId: Short,
unconvertedResponse: FetchResponse[Records],
unconvertedResponse: FetchResponse,
quota: ReplicationQuotaManager): Int = {
FetchResponse.sizeOf(versionId, unconvertedResponse.responseData.entrySet
.iterator.asScala.filter(element => quota.isThrottled(element.getKey)).asJava)

View File

@ -17,28 +17,24 @@
package kafka.server
import java.util
import java.util.Optional
import kafka.api.Request
import kafka.cluster.BrokerEndPoint
import kafka.log.{LeaderOffsetIncremented, LogAppendInfo}
import kafka.server.AbstractFetcherThread.ReplicaFetch
import kafka.server.AbstractFetcherThread.ResultWithPartitions
import kafka.server.AbstractFetcherThread.{ReplicaFetch, ResultWithPartitions}
import kafka.server.QuotaFactory.UnboundedQuota
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.errors.KafkaStorageException
import org.apache.kafka.common.message.FetchResponseData
import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset
import org.apache.kafka.common.protocol.{ApiKeys, Errors}
import org.apache.kafka.common.record.Records
import org.apache.kafka.common.requests.FetchResponse.PartitionData
import org.apache.kafka.common.requests.{FetchRequest, FetchResponse}
import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH
import org.apache.kafka.common.requests.RequestUtils
import org.apache.kafka.common.requests.{FetchRequest, FetchResponse, RequestUtils}
import scala.jdk.CollectionConverters._
import java.util
import java.util.Optional
import scala.collection.{Map, Seq, Set, mutable}
import scala.compat.java8.OptionConverters._
import scala.jdk.CollectionConverters._
class ReplicaAlterLogDirsThread(name: String,
sourceBroker: BrokerEndPoint,
@ -77,15 +73,21 @@ class ReplicaAlterLogDirsThread(name: String,
}
def fetchFromLeader(fetchRequest: FetchRequest.Builder): Map[TopicPartition, FetchData] = {
var partitionData: Seq[(TopicPartition, FetchResponse.PartitionData[Records])] = null
var partitionData: Seq[(TopicPartition, FetchData)] = null
val request = fetchRequest.build()
def processResponseCallback(responsePartitionData: Seq[(TopicPartition, FetchPartitionData)]): Unit = {
partitionData = responsePartitionData.map { case (tp, data) =>
val abortedTransactions = data.abortedTransactions.map(_.asJava).orNull
val lastStableOffset = data.lastStableOffset.getOrElse(FetchResponse.INVALID_LAST_STABLE_OFFSET)
tp -> new FetchResponse.PartitionData(data.error, data.highWatermark, lastStableOffset,
data.logStartOffset, abortedTransactions, data.records)
tp -> new FetchResponseData.PartitionData()
.setPartitionIndex(tp.partition)
.setErrorCode(data.error.code)
.setHighWatermark(data.highWatermark)
.setLastStableOffset(lastStableOffset)
.setLogStartOffset(data.logStartOffset)
.setAbortedTransactions(abortedTransactions)
.setRecords(data.records)
}
}
@ -110,10 +112,10 @@ class ReplicaAlterLogDirsThread(name: String,
// process fetched data
override def processPartitionData(topicPartition: TopicPartition,
fetchOffset: Long,
partitionData: PartitionData[Records]): Option[LogAppendInfo] = {
partitionData: FetchData): Option[LogAppendInfo] = {
val partition = replicaMgr.getPartitionOrException(topicPartition)
val futureLog = partition.futureLocalLogOrException
val records = toMemoryRecords(partitionData.records)
val records = toMemoryRecords(FetchResponse.recordsOrFail(partitionData))
if (fetchOffset != futureLog.logEndOffset)
throw new IllegalStateException("Offset mismatch for the future replica %s: fetched offset = %d, log end offset = %d.".format(

View File

@ -35,7 +35,7 @@ import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetFor
import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset
import org.apache.kafka.common.metrics.Metrics
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.record.{MemoryRecords, Records}
import org.apache.kafka.common.record.MemoryRecords
import org.apache.kafka.common.requests._
import org.apache.kafka.common.utils.{LogContext, Time}
@ -162,7 +162,7 @@ class ReplicaFetcherThread(name: String,
val logTrace = isTraceEnabled
val partition = replicaMgr.getPartitionOrException(topicPartition)
val log = partition.localLogOrException
val records = toMemoryRecords(partitionData.records)
val records = toMemoryRecords(FetchResponse.recordsOrFail(partitionData))
maybeWarnIfOversizedRecords(records, topicPartition)
@ -215,7 +215,7 @@ class ReplicaFetcherThread(name: String,
override protected def fetchFromLeader(fetchRequest: FetchRequest.Builder): Map[TopicPartition, FetchData] = {
try {
val clientResponse = leaderEndpoint.sendRequest(fetchRequest)
val fetchResponse = clientResponse.responseBody.asInstanceOf[FetchResponse[Records]]
val fetchResponse = clientResponse.responseBody.asInstanceOf[FetchResponse]
if (!fetchSessionHandler.handleResponse(fetchResponse)) {
Map.empty
} else {

View File

@ -57,7 +57,6 @@ import org.apache.kafka.common.replica.PartitionView.DefaultPartitionView
import org.apache.kafka.common.replica.ReplicaView.DefaultReplicaView
import org.apache.kafka.common.replica.{ClientMetadata, _}
import org.apache.kafka.common.requests.FetchRequest.PartitionData
import org.apache.kafka.common.requests.FetchResponse.AbortedTransaction
import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse
import org.apache.kafka.common.requests._
import org.apache.kafka.common.utils.Time
@ -150,7 +149,7 @@ case class FetchPartitionData(error: Errors = Errors.NONE,
records: Records,
divergingEpoch: Option[FetchResponseData.EpochEndOffset],
lastStableOffset: Option[Long],
abortedTransactions: Option[List[AbortedTransaction]],
abortedTransactions: Option[List[FetchResponseData.AbortedTransaction]],
preferredReadReplica: Option[Int],
isReassignmentFetch: Boolean)

View File

@ -17,6 +17,22 @@
package kafka.tools
import joptsimple.OptionParser
import kafka.api._
import kafka.utils.{IncludeList, _}
import org.apache.kafka.clients._
import org.apache.kafka.clients.admin.{Admin, ListTopicsOptions, TopicDescription}
import org.apache.kafka.clients.consumer.{ConsumerConfig, KafkaConsumer}
import org.apache.kafka.common.message.FetchResponseData
import org.apache.kafka.common.metrics.Metrics
import org.apache.kafka.common.network.{NetworkReceive, Selectable, Selector}
import org.apache.kafka.common.protocol.{ApiKeys, Errors}
import org.apache.kafka.common.requests.AbstractRequest.Builder
import org.apache.kafka.common.requests.{AbstractRequest, FetchResponse, ListOffsetsRequest, FetchRequest => JFetchRequest}
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.kafka.common.utils.{LogContext, Time}
import org.apache.kafka.common.{Node, TopicPartition}
import java.net.SocketTimeoutException
import java.text.SimpleDateFormat
import java.util
@ -24,26 +40,8 @@ import java.util.concurrent.CountDownLatch
import java.util.concurrent.atomic.{AtomicInteger, AtomicReference}
import java.util.regex.{Pattern, PatternSyntaxException}
import java.util.{Date, Optional, Properties}
import joptsimple.OptionParser
import kafka.api._
import kafka.utils.IncludeList
import kafka.utils._
import org.apache.kafka.clients._
import org.apache.kafka.clients.admin.{Admin, ListTopicsOptions, TopicDescription}
import org.apache.kafka.clients.consumer.{ConsumerConfig, KafkaConsumer}
import org.apache.kafka.common.metrics.Metrics
import org.apache.kafka.common.network.{NetworkReceive, Selectable, Selector}
import org.apache.kafka.common.protocol.{ApiKeys, Errors}
import org.apache.kafka.common.record.MemoryRecords
import org.apache.kafka.common.requests.AbstractRequest.Builder
import org.apache.kafka.common.requests.{AbstractRequest, FetchResponse, ListOffsetsRequest, FetchRequest => JFetchRequest}
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.kafka.common.utils.{LogContext, Time}
import org.apache.kafka.common.{Node, TopicPartition}
import scala.jdk.CollectionConverters._
import scala.collection.Seq
import scala.jdk.CollectionConverters._
/**
* For verifying the consistency among replicas.
@ -261,7 +259,7 @@ private class ReplicaBuffer(expectedReplicasPerTopicPartition: collection.Map[To
expectedNumFetchers: Int,
reportInterval: Long) extends Logging {
private val fetchOffsetMap = new Pool[TopicPartition, Long]
private val recordsCache = new Pool[TopicPartition, Pool[Int, FetchResponse.PartitionData[MemoryRecords]]]
private val recordsCache = new Pool[TopicPartition, Pool[Int, FetchResponseData.PartitionData]]
private val fetcherBarrier = new AtomicReference(new CountDownLatch(expectedNumFetchers))
private val verificationBarrier = new AtomicReference(new CountDownLatch(1))
@volatile private var lastReportTime = Time.SYSTEM.milliseconds
@ -284,7 +282,7 @@ private class ReplicaBuffer(expectedReplicasPerTopicPartition: collection.Map[To
private def initialize(): Unit = {
for (topicPartition <- expectedReplicasPerTopicPartition.keySet)
recordsCache.put(topicPartition, new Pool[Int, FetchResponse.PartitionData[MemoryRecords]])
recordsCache.put(topicPartition, new Pool[Int, FetchResponseData.PartitionData])
setInitialOffsets()
}
@ -294,7 +292,7 @@ private class ReplicaBuffer(expectedReplicasPerTopicPartition: collection.Map[To
fetchOffsetMap.put(tp, offset)
}
def addFetchedData(topicAndPartition: TopicPartition, replicaId: Int, partitionData: FetchResponse.PartitionData[MemoryRecords]): Unit = {
def addFetchedData(topicAndPartition: TopicPartition, replicaId: Int, partitionData: FetchResponseData.PartitionData): Unit = {
recordsCache.get(topicAndPartition).put(replicaId, partitionData)
}
@ -311,7 +309,7 @@ private class ReplicaBuffer(expectedReplicasPerTopicPartition: collection.Map[To
"fetched " + fetchResponsePerReplica.size + " replicas for " + topicPartition + ", but expected "
+ expectedReplicasPerTopicPartition(topicPartition) + " replicas")
val recordBatchIteratorMap = fetchResponsePerReplica.map { case (replicaId, fetchResponse) =>
replicaId -> fetchResponse.records.batches.iterator
replicaId -> FetchResponse.recordsOrFail(fetchResponse).batches.iterator
}
val maxHw = fetchResponsePerReplica.values.map(_.highWatermark).max
@ -403,10 +401,10 @@ private class ReplicaFetcher(name: String, sourceBroker: Node, topicPartitions:
debug("Issuing fetch request ")
var fetchResponse: FetchResponse[MemoryRecords] = null
var fetchResponse: FetchResponse = null
try {
val clientResponse = fetchEndpoint.sendRequest(fetchRequestBuilder)
fetchResponse = clientResponse.responseBody.asInstanceOf[FetchResponse[MemoryRecords]]
fetchResponse = clientResponse.responseBody.asInstanceOf[FetchResponse]
} catch {
case t: Throwable =>
if (!isRunning)
@ -414,14 +412,13 @@ private class ReplicaFetcher(name: String, sourceBroker: Node, topicPartitions:
}
if (fetchResponse != null) {
fetchResponse.responseData.forEach { (tp, partitionData) =>
replicaBuffer.addFetchedData(tp, sourceBroker.id, partitionData)
}
fetchResponse.data.responses.forEach(topicResponse =>
topicResponse.partitions.forEach(partitionResponse =>
replicaBuffer.addFetchedData(new TopicPartition(topicResponse.topic, partitionResponse.partitionIndex),
sourceBroker.id, partitionResponse)))
} else {
val emptyResponse = new FetchResponse.PartitionData(Errors.NONE, FetchResponse.INVALID_HIGHWATERMARK,
FetchResponse.INVALID_LAST_STABLE_OFFSET, FetchResponse.INVALID_LOG_START_OFFSET, null, MemoryRecords.EMPTY)
for (topicAndPartition <- topicPartitions)
replicaBuffer.addFetchedData(topicAndPartition, sourceBroker.id, emptyResponse)
replicaBuffer.addFetchedData(topicAndPartition, sourceBroker.id, FetchResponse.partitionResponse(topicAndPartition.partition, Errors.NONE))
}
fetcherBarrier.countDown()

View File

@ -24,7 +24,6 @@ import kafka.utils.Logging
import org.apache.kafka.common.internals.FatalExitError
import org.apache.kafka.common.message.{BeginQuorumEpochResponseData, EndQuorumEpochResponseData, FetchResponseData, FetchSnapshotResponseData, VoteResponseData}
import org.apache.kafka.common.protocol.{ApiKeys, ApiMessage}
import org.apache.kafka.common.record.BaseRecords
import org.apache.kafka.common.requests.{AbstractRequest, AbstractResponse, BeginQuorumEpochResponse, EndQuorumEpochResponse, FetchResponse, FetchSnapshotResponse, VoteResponse}
import org.apache.kafka.common.utils.Time
@ -81,7 +80,7 @@ class TestRaftRequestHandler(
}
private def handleFetch(request: RequestChannel.Request): Unit = {
handle(request, response => new FetchResponse[BaseRecords](response.asInstanceOf[FetchResponseData]))
handle(request, response => new FetchResponse(response.asInstanceOf[FetchResponseData]))
}
private def handleFetchSnapshot(request: RequestChannel.Request): Unit = {

View File

@ -49,7 +49,7 @@ import org.apache.kafka.common.message.UpdateMetadataRequestData.{UpdateMetadata
import org.apache.kafka.common.message.{AddOffsetsToTxnRequestData, AlterPartitionReassignmentsRequestData, AlterReplicaLogDirsRequestData, ControlledShutdownRequestData, CreateAclsRequestData, CreatePartitionsRequestData, CreateTopicsRequestData, DeleteAclsRequestData, DeleteGroupsRequestData, DeleteRecordsRequestData, DeleteTopicsRequestData, DescribeClusterRequestData, DescribeConfigsRequestData, DescribeGroupsRequestData, DescribeLogDirsRequestData, DescribeProducersRequestData, DescribeTransactionsRequestData, FindCoordinatorRequestData, HeartbeatRequestData, IncrementalAlterConfigsRequestData, JoinGroupRequestData, ListPartitionReassignmentsRequestData, ListTransactionsRequestData, MetadataRequestData, OffsetCommitRequestData, ProduceRequestData, SyncGroupRequestData}
import org.apache.kafka.common.network.ListenerName
import org.apache.kafka.common.protocol.{ApiKeys, Errors}
import org.apache.kafka.common.record.{CompressionType, MemoryRecords, RecordBatch, Records, SimpleRecord}
import org.apache.kafka.common.record.{CompressionType, MemoryRecords, RecordBatch, SimpleRecord}
import org.apache.kafka.common.requests._
import org.apache.kafka.common.resource.PatternType.{LITERAL, PREFIXED}
import org.apache.kafka.common.resource.ResourceType._
@ -159,7 +159,7 @@ class AuthorizerIntegrationTest extends BaseRequestTest {
val requestKeyToError = (topicNames: Map[Uuid, String]) => Map[ApiKeys, Nothing => Errors](
ApiKeys.METADATA -> ((resp: requests.MetadataResponse) => resp.errors.asScala.find(_._1 == topic).getOrElse(("test", Errors.NONE))._2),
ApiKeys.PRODUCE -> ((resp: requests.ProduceResponse) => resp.responses.asScala.find(_._1 == tp).get._2.error),
ApiKeys.FETCH -> ((resp: requests.FetchResponse[Records]) => resp.responseData.asScala.find(_._1 == tp).get._2.error),
ApiKeys.FETCH -> ((resp: requests.FetchResponse) => Errors.forCode(resp.responseData.asScala.find(_._1 == tp).get._2.errorCode)),
ApiKeys.LIST_OFFSETS -> ((resp: ListOffsetsResponse) => {
Errors.forCode(
resp.data
@ -169,12 +169,12 @@ class AuthorizerIntegrationTest extends BaseRequestTest {
)
}),
ApiKeys.OFFSET_COMMIT -> ((resp: requests.OffsetCommitResponse) => Errors.forCode(
resp.data().topics().get(0).partitions().get(0).errorCode())),
resp.data.topics().get(0).partitions().get(0).errorCode)),
ApiKeys.OFFSET_FETCH -> ((resp: requests.OffsetFetchResponse) => resp.error),
ApiKeys.FIND_COORDINATOR -> ((resp: FindCoordinatorResponse) => resp.error),
ApiKeys.UPDATE_METADATA -> ((resp: requests.UpdateMetadataResponse) => resp.error),
ApiKeys.JOIN_GROUP -> ((resp: JoinGroupResponse) => resp.error),
ApiKeys.SYNC_GROUP -> ((resp: SyncGroupResponse) => Errors.forCode(resp.data.errorCode())),
ApiKeys.SYNC_GROUP -> ((resp: SyncGroupResponse) => Errors.forCode(resp.data.errorCode)),
ApiKeys.DESCRIBE_GROUPS -> ((resp: DescribeGroupsResponse) => {
Errors.forCode(resp.data.groups.asScala.find(g => group == g.groupId).head.errorCode)
}),
@ -187,8 +187,8 @@ class AuthorizerIntegrationTest extends BaseRequestTest {
ApiKeys.STOP_REPLICA -> ((resp: requests.StopReplicaResponse) => Errors.forCode(
resp.partitionErrors.asScala.find(pe => pe.topicName == tp.topic && pe.partitionIndex == tp.partition).get.errorCode)),
ApiKeys.CONTROLLED_SHUTDOWN -> ((resp: requests.ControlledShutdownResponse) => resp.error),
ApiKeys.CREATE_TOPICS -> ((resp: CreateTopicsResponse) => Errors.forCode(resp.data.topics.find(topic).errorCode())),
ApiKeys.DELETE_TOPICS -> ((resp: requests.DeleteTopicsResponse) => Errors.forCode(resp.data.responses.find(topic).errorCode())),
ApiKeys.CREATE_TOPICS -> ((resp: CreateTopicsResponse) => Errors.forCode(resp.data.topics.find(topic).errorCode)),
ApiKeys.DELETE_TOPICS -> ((resp: requests.DeleteTopicsResponse) => Errors.forCode(resp.data.responses.find(topic).errorCode)),
ApiKeys.DELETE_RECORDS -> ((resp: requests.DeleteRecordsResponse) => Errors.forCode(
resp.data.topics.find(tp.topic).partitions.find(tp.partition).errorCode)),
ApiKeys.OFFSET_FOR_LEADER_EPOCH -> ((resp: OffsetsForLeaderEpochResponse) => Errors.forCode(
@ -211,17 +211,17 @@ class AuthorizerIntegrationTest extends BaseRequestTest {
.find(p => p.partitionIndex == tp.partition).get.errorCode)),
ApiKeys.DESCRIBE_LOG_DIRS -> ((resp: DescribeLogDirsResponse) =>
if (resp.data.results.size() > 0) Errors.forCode(resp.data.results.get(0).errorCode) else Errors.CLUSTER_AUTHORIZATION_FAILED),
ApiKeys.CREATE_PARTITIONS -> ((resp: CreatePartitionsResponse) => Errors.forCode(resp.data.results.asScala.head.errorCode())),
ApiKeys.ELECT_LEADERS -> ((resp: ElectLeadersResponse) => Errors.forCode(resp.data().errorCode())),
ApiKeys.CREATE_PARTITIONS -> ((resp: CreatePartitionsResponse) => Errors.forCode(resp.data.results.asScala.head.errorCode)),
ApiKeys.ELECT_LEADERS -> ((resp: ElectLeadersResponse) => Errors.forCode(resp.data.errorCode)),
ApiKeys.INCREMENTAL_ALTER_CONFIGS -> ((resp: IncrementalAlterConfigsResponse) => {
val topicResourceError = IncrementalAlterConfigsResponse.fromResponseData(resp.data()).get(new ConfigResource(ConfigResource.Type.TOPIC, tp.topic))
val topicResourceError = IncrementalAlterConfigsResponse.fromResponseData(resp.data).get(new ConfigResource(ConfigResource.Type.TOPIC, tp.topic))
if (topicResourceError == null)
IncrementalAlterConfigsResponse.fromResponseData(resp.data()).get(new ConfigResource(ConfigResource.Type.BROKER_LOGGER, brokerId.toString)).error
IncrementalAlterConfigsResponse.fromResponseData(resp.data).get(new ConfigResource(ConfigResource.Type.BROKER_LOGGER, brokerId.toString)).error
else
topicResourceError.error()
}),
ApiKeys.ALTER_PARTITION_REASSIGNMENTS -> ((resp: AlterPartitionReassignmentsResponse) => Errors.forCode(resp.data().errorCode())),
ApiKeys.LIST_PARTITION_REASSIGNMENTS -> ((resp: ListPartitionReassignmentsResponse) => Errors.forCode(resp.data().errorCode())),
ApiKeys.ALTER_PARTITION_REASSIGNMENTS -> ((resp: AlterPartitionReassignmentsResponse) => Errors.forCode(resp.data.errorCode)),
ApiKeys.LIST_PARTITION_REASSIGNMENTS -> ((resp: ListPartitionReassignmentsResponse) => Errors.forCode(resp.data.errorCode)),
ApiKeys.OFFSET_DELETE -> ((resp: OffsetDeleteResponse) => {
Errors.forCode(
resp.data
@ -326,9 +326,9 @@ class AuthorizerIntegrationTest extends BaseRequestTest {
requests.ProduceRequest.forCurrentMagic(new ProduceRequestData()
.setTopicData(new ProduceRequestData.TopicProduceDataCollection(
Collections.singletonList(new ProduceRequestData.TopicProduceData()
.setName(tp.topic()).setPartitionData(Collections.singletonList(
.setName(tp.topic).setPartitionData(Collections.singletonList(
new ProduceRequestData.PartitionProduceData()
.setIndex(tp.partition())
.setIndex(tp.partition)
.setRecords(MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("test".getBytes))))))
.iterator))
.setAcks(1.toShort)
@ -363,9 +363,9 @@ class AuthorizerIntegrationTest extends BaseRequestTest {
private def offsetsForLeaderEpochRequest: OffsetsForLeaderEpochRequest = {
val epochs = new OffsetForLeaderTopicCollection()
epochs.add(new OffsetForLeaderTopic()
.setTopic(tp.topic())
.setTopic(tp.topic)
.setPartitions(List(new OffsetForLeaderPartition()
.setPartition(tp.partition())
.setPartition(tp.partition)
.setLeaderEpoch(7)
.setCurrentLeaderEpoch(27)).asJava))
OffsetsForLeaderEpochRequest.Builder.forConsumer(epochs).build()
@ -509,9 +509,9 @@ class AuthorizerIntegrationTest extends BaseRequestTest {
private def stopReplicaRequest: StopReplicaRequest = {
val topicStates = Seq(
new StopReplicaTopicState()
.setTopicName(tp.topic())
.setTopicName(tp.topic)
.setPartitionStates(Seq(new StopReplicaPartitionState()
.setPartitionIndex(tp.partition())
.setPartitionIndex(tp.partition)
.setLeaderEpoch(LeaderAndIsr.initialLeaderEpoch + 2)
.setDeletePartition(true)).asJava)
).asJava
@ -658,7 +658,7 @@ class AuthorizerIntegrationTest extends BaseRequestTest {
List(new AlterPartitionReassignmentsRequestData.ReassignableTopic()
.setName(topic)
.setPartitions(
List(new AlterPartitionReassignmentsRequestData.ReassignablePartition().setPartitionIndex(tp.partition())).asJava
List(new AlterPartitionReassignmentsRequestData.ReassignablePartition().setPartitionIndex(tp.partition)).asJava
)).asJava
)
).build()
@ -1625,7 +1625,7 @@ class AuthorizerIntegrationTest extends BaseRequestTest {
@Test
def testUnauthorizedCreatePartitions(): Unit = {
val createPartitionsResponse = connectAndReceive[CreatePartitionsResponse](createPartitionsRequest)
assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED.code(), createPartitionsResponse.data.results.asScala.head.errorCode())
assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED.code, createPartitionsResponse.data.results.asScala.head.errorCode)
}
@Test
@ -1633,7 +1633,7 @@ class AuthorizerIntegrationTest extends BaseRequestTest {
createTopic(topic)
addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WildcardHost, ALTER, ALLOW)), new ResourcePattern(TOPIC, "*", LITERAL))
val createPartitionsResponse = connectAndReceive[CreatePartitionsResponse](createPartitionsRequest)
assertEquals(Errors.NONE.code(), createPartitionsResponse.data.results.asScala.head.errorCode())
assertEquals(Errors.NONE.code, createPartitionsResponse.data.results.asScala.head.errorCode)
}
@Test
@ -2133,7 +2133,7 @@ class AuthorizerIntegrationTest extends BaseRequestTest {
numRecords: Int,
tp: TopicPartition): Unit = {
val futures = (0 until numRecords).map { i =>
producer.send(new ProducerRecord(tp.topic(), tp.partition(), i.toString.getBytes, i.toString.getBytes))
producer.send(new ProducerRecord(tp.topic, tp.partition, i.toString.getBytes, i.toString.getBytes))
}
try {
futures.foreach(_.get)

View File

@ -18,9 +18,8 @@
package kafka.tools
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.message.FetchResponseData
import org.apache.kafka.common.record.{CompressionType, MemoryRecords, SimpleRecord}
import org.apache.kafka.common.requests.FetchResponse
import org.junit.jupiter.api.Test
import org.junit.jupiter.api.Assertions.assertTrue
@ -44,7 +43,12 @@ class ReplicaVerificationToolTest {
}
val initialOffset = 4
val memoryRecords = MemoryRecords.withRecords(initialOffset, CompressionType.NONE, records: _*)
val partitionData = new FetchResponse.PartitionData(Errors.NONE, 20, 20, 0L, null, memoryRecords)
val partitionData = new FetchResponseData.PartitionData()
.setPartitionIndex(tp.partition)
.setHighWatermark(20)
.setLastStableOffset(20)
.setLogStartOffset(0)
.setRecords(memoryRecords)
replicaBuffer.addFetchedData(tp, replicaId, partitionData)
}

View File

@ -34,11 +34,11 @@ import kafka.server.{BrokerTopicStats, FetchDataInfo, FetchHighWatermark, FetchI
import kafka.utils._
import org.apache.kafka.common.{InvalidRecordException, KafkaException, TopicPartition, Uuid}
import org.apache.kafka.common.errors._
import org.apache.kafka.common.message.FetchResponseData
import org.apache.kafka.common.record.FileRecords.TimestampAndOffset
import org.apache.kafka.common.record.MemoryRecords.RecordFilter
import org.apache.kafka.common.record.MemoryRecords.RecordFilter.BatchRetention
import org.apache.kafka.common.record._
import org.apache.kafka.common.requests.FetchResponse.AbortedTransaction
import org.apache.kafka.common.requests.{ListOffsetsRequest, ListOffsetsResponse}
import org.apache.kafka.common.utils.{BufferSupplier, Time, Utils}
import org.easymock.EasyMock
@ -4725,7 +4725,8 @@ class LogTest {
assertEquals(1, fetchDataInfo.abortedTransactions.size)
assertTrue(fetchDataInfo.abortedTransactions.isDefined)
assertEquals(new AbortedTransaction(pid, 0), fetchDataInfo.abortedTransactions.get.head)
assertEquals(new FetchResponseData.AbortedTransaction().setProducerId(pid).setFirstOffset(0),
fetchDataInfo.abortedTransactions.get.head)
}
@Test

View File

@ -17,7 +17,7 @@
package kafka.log
import kafka.utils.TestUtils
import org.apache.kafka.common.requests.FetchResponse.AbortedTransaction
import org.apache.kafka.common.message.FetchResponseData
import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api.{AfterEach, BeforeEach, Test}
@ -136,7 +136,7 @@ class TransactionIndexTest {
assertEquals(abortedTransactions.take(3), index.collectAbortedTxns(0L, 100L).abortedTransactions)
index.reset()
assertEquals(List.empty[AbortedTransaction], index.collectAbortedTxns(0L, 100L).abortedTransactions)
assertEquals(List.empty[FetchResponseData.AbortedTransaction], index.collectAbortedTxns(0L, 100L).abortedTransactions)
}
@Test

View File

@ -20,7 +20,6 @@ package kafka.server
import java.nio.ByteBuffer
import java.util.Optional
import java.util.concurrent.atomic.AtomicInteger
import kafka.cluster.BrokerEndPoint
import kafka.log.LogAppendInfo
import kafka.message.NoCompressionCodec
@ -37,7 +36,7 @@ import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEnd
import org.apache.kafka.common.protocol.{ApiKeys, Errors}
import org.apache.kafka.common.record._
import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.{UNDEFINED_EPOCH, UNDEFINED_EPOCH_OFFSET}
import org.apache.kafka.common.requests.FetchRequest
import org.apache.kafka.common.requests.{FetchRequest, FetchResponse}
import org.apache.kafka.common.utils.Time
import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api.{BeforeEach, Test}
@ -907,8 +906,8 @@ class AbstractFetcherThreadTest {
partitionData: FetchData): Option[LogAppendInfo] = {
val state = replicaPartitionState(topicPartition)
if (isTruncationOnFetchSupported && partitionData.divergingEpoch.isPresent) {
val divergingEpoch = partitionData.divergingEpoch.get
if (isTruncationOnFetchSupported && FetchResponse.isDivergingEpoch(partitionData)) {
val divergingEpoch = partitionData.divergingEpoch
truncateOnFetchResponse(Map(topicPartition -> new EpochEndOffset()
.setPartition(topicPartition.partition)
.setErrorCode(Errors.NONE.code)
@ -923,7 +922,7 @@ class AbstractFetcherThreadTest {
s"fetched offset = $fetchOffset, log end offset = ${state.logEndOffset}.")
// Now check message's crc
val batches = partitionData.records.batches.asScala
val batches = FetchResponse.recordsOrFail(partitionData).batches.asScala
var maxTimestamp = RecordBatch.NO_TIMESTAMP
var offsetOfMaxTimestamp = -1L
var lastOffset = state.logEndOffset
@ -955,7 +954,7 @@ class AbstractFetcherThreadTest {
sourceCodec = NoCompressionCodec,
targetCodec = NoCompressionCodec,
shallowCount = batches.size,
validBytes = partitionData.records.sizeInBytes,
validBytes = FetchResponse.recordsSize(partitionData),
offsetsMonotonic = true,
lastOffsetOfFirstBatch = batches.headOption.map(_.lastOffset).getOrElse(-1)))
}
@ -1143,9 +1142,16 @@ class AbstractFetcherThreadTest {
(Errors.NONE, records)
}
val partitionData = new FetchData()
.setPartitionIndex(partition.partition)
.setErrorCode(error.code)
.setHighWatermark(leaderState.highWatermark)
.setLastStableOffset(leaderState.highWatermark)
.setLogStartOffset(leaderState.logStartOffset)
.setRecords(records)
divergingEpoch.foreach(partitionData.setDivergingEpoch)
(partition, new FetchData(error, leaderState.highWatermark, leaderState.highWatermark, leaderState.logStartOffset,
Optional.empty[Integer], List.empty.asJava, divergingEpoch.asJava, records))
(partition, partitionData)
}.toMap
}

View File

@ -23,7 +23,6 @@ import kafka.utils.TestUtils
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.record.MemoryRecords
import org.apache.kafka.common.requests.{FetchRequest, FetchResponse}
import org.apache.kafka.common.serialization.StringSerializer
import org.junit.jupiter.api.Assertions._
@ -79,8 +78,8 @@ class FetchRequestDownConversionConfigTest extends BaseRequestTest {
partitionMap
}
private def sendFetchRequest(leaderId: Int, request: FetchRequest): FetchResponse[MemoryRecords] = {
connectAndReceive[FetchResponse[MemoryRecords]](request, destination = brokerSocketServer(leaderId))
private def sendFetchRequest(leaderId: Int, request: FetchRequest): FetchResponse = {
connectAndReceive[FetchResponse](request, destination = brokerSocketServer(leaderId))
}
/**
@ -90,11 +89,11 @@ class FetchRequestDownConversionConfigTest extends BaseRequestTest {
def testV1FetchWithDownConversionDisabled(): Unit = {
val topicMap = createTopics(numTopics = 5, numPartitions = 1)
val topicPartitions = topicMap.keySet.toSeq
topicPartitions.foreach(tp => producer.send(new ProducerRecord(tp.topic(), "key", "value")).get())
topicPartitions.foreach(tp => producer.send(new ProducerRecord(tp.topic, "key", "value")).get())
val fetchRequest = FetchRequest.Builder.forConsumer(Int.MaxValue, 0, createPartitionMap(1024,
topicPartitions)).build(1)
val fetchResponse = sendFetchRequest(topicMap.head._2, fetchRequest)
topicPartitions.foreach(tp => assertEquals(Errors.UNSUPPORTED_VERSION, fetchResponse.responseData().get(tp).error))
topicPartitions.foreach(tp => assertEquals(Errors.UNSUPPORTED_VERSION, Errors.forCode(fetchResponse.responseData.get(tp).errorCode)))
}
/**
@ -104,11 +103,11 @@ class FetchRequestDownConversionConfigTest extends BaseRequestTest {
def testLatestFetchWithDownConversionDisabled(): Unit = {
val topicMap = createTopics(numTopics = 5, numPartitions = 1)
val topicPartitions = topicMap.keySet.toSeq
topicPartitions.foreach(tp => producer.send(new ProducerRecord(tp.topic(), "key", "value")).get())
topicPartitions.foreach(tp => producer.send(new ProducerRecord(tp.topic, "key", "value")).get())
val fetchRequest = FetchRequest.Builder.forConsumer(Int.MaxValue, 0, createPartitionMap(1024,
topicPartitions)).build()
val fetchResponse = sendFetchRequest(topicMap.head._2, fetchRequest)
topicPartitions.foreach(tp => assertEquals(Errors.NONE, fetchResponse.responseData().get(tp).error))
topicPartitions.foreach(tp => assertEquals(Errors.NONE, Errors.forCode(fetchResponse.responseData.get(tp).errorCode)))
}
/**
@ -129,13 +128,13 @@ class FetchRequestDownConversionConfigTest extends BaseRequestTest {
val allTopics = conversionDisabledTopicPartitions ++ conversionEnabledTopicPartitions
val leaderId = conversionDisabledTopicsMap.head._2
allTopics.foreach(tp => producer.send(new ProducerRecord(tp.topic(), "key", "value")).get())
allTopics.foreach(tp => producer.send(new ProducerRecord(tp.topic, "key", "value")).get())
val fetchRequest = FetchRequest.Builder.forConsumer(Int.MaxValue, 0, createPartitionMap(1024,
allTopics)).build(1)
val fetchResponse = sendFetchRequest(leaderId, fetchRequest)
conversionDisabledTopicPartitions.foreach(tp => assertEquals(Errors.UNSUPPORTED_VERSION, fetchResponse.responseData().get(tp).error))
conversionEnabledTopicPartitions.foreach(tp => assertEquals(Errors.NONE, fetchResponse.responseData().get(tp).error))
conversionDisabledTopicPartitions.foreach(tp => assertEquals(Errors.UNSUPPORTED_VERSION.code, fetchResponse.responseData.get(tp).errorCode))
conversionEnabledTopicPartitions.foreach(tp => assertEquals(Errors.NONE.code, fetchResponse.responseData.get(tp).errorCode))
}
/**
@ -155,11 +154,11 @@ class FetchRequestDownConversionConfigTest extends BaseRequestTest {
val allTopicPartitions = conversionDisabledTopicPartitions ++ conversionEnabledTopicPartitions
val leaderId = conversionDisabledTopicsMap.head._2
allTopicPartitions.foreach(tp => producer.send(new ProducerRecord(tp.topic(), "key", "value")).get())
allTopicPartitions.foreach(tp => producer.send(new ProducerRecord(tp.topic, "key", "value")).get())
val fetchRequest = FetchRequest.Builder.forReplica(1, 1, Int.MaxValue, 0,
createPartitionMap(1024, allTopicPartitions)).build()
val fetchResponse = sendFetchRequest(leaderId, fetchRequest)
allTopicPartitions.foreach(tp => assertEquals(Errors.NONE, fetchResponse.responseData().get(tp).error))
allTopicPartitions.foreach(tp => assertEquals(Errors.NONE.code, fetchResponse.responseData.get(tp).errorCode))
}
}

View File

@ -17,17 +17,16 @@
package kafka.server
import java.util.{Optional, Properties}
import kafka.log.LogConfig
import kafka.utils.TestUtils
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.record.MemoryRecords
import org.apache.kafka.common.requests.FetchRequest.PartitionData
import org.apache.kafka.common.requests.{FetchRequest, FetchResponse}
import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api.{AfterEach, BeforeEach, Test}
import java.util.{Optional, Properties}
import scala.jdk.CollectionConverters._
/**
@ -92,8 +91,8 @@ class FetchRequestMaxBytesTest extends BaseRequestTest {
})
}
private def sendFetchRequest(leaderId: Int, request: FetchRequest): FetchResponse[MemoryRecords] = {
connectAndReceive[FetchResponse[MemoryRecords]](request, destination = brokerSocketServer(leaderId))
private def sendFetchRequest(leaderId: Int, request: FetchRequest): FetchResponse = {
connectAndReceive[FetchResponse](request, destination = brokerSocketServer(leaderId))
}
/**
@ -117,7 +116,7 @@ class FetchRequestMaxBytesTest extends BaseRequestTest {
FetchRequest.Builder.forConsumer(Int.MaxValue, 0,
Map(testTopicPartition ->
new PartitionData(fetchOffset, 0, Integer.MAX_VALUE, Optional.empty())).asJava).build(3))
val records = response.responseData().get(testTopicPartition).records.records()
val records = FetchResponse.recordsOrFail(response.responseData.get(testTopicPartition)).records()
assertNotNull(records)
val recordsList = records.asScala.toList
assertEquals(expected.size, recordsList.size)

View File

@ -16,24 +16,25 @@
*/
package kafka.server
import java.io.DataInputStream
import java.util
import java.util.{Optional, Properties}
import kafka.api.KAFKA_0_11_0_IV2
import kafka.log.LogConfig
import kafka.message.{GZIPCompressionCodec, ProducerCompressionCodec, ZStdCompressionCodec}
import kafka.utils.TestUtils
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord, RecordMetadata}
import org.apache.kafka.common.message.FetchResponseData
import org.apache.kafka.common.protocol.{ApiKeys, Errors}
import org.apache.kafka.common.record.{MemoryRecords, Record, RecordBatch}
import org.apache.kafka.common.record.{Record, RecordBatch}
import org.apache.kafka.common.requests.{FetchRequest, FetchResponse, FetchMetadata => JFetchMetadata}
import org.apache.kafka.common.serialization.{ByteArraySerializer, StringSerializer}
import org.apache.kafka.common.{IsolationLevel, TopicPartition}
import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api.{AfterEach, Test}
import scala.jdk.CollectionConverters._
import java.io.DataInputStream
import java.util
import java.util.{Optional, Properties}
import scala.collection.Seq
import scala.jdk.CollectionConverters._
import scala.util.Random
/**
@ -70,8 +71,8 @@ class FetchRequestTest extends BaseRequestTest {
partitionMap
}
private def sendFetchRequest(leaderId: Int, request: FetchRequest): FetchResponse[MemoryRecords] = {
connectAndReceive[FetchResponse[MemoryRecords]](request, destination = brokerSocketServer(leaderId))
private def sendFetchRequest(leaderId: Int, request: FetchRequest): FetchResponse = {
connectAndReceive[FetchResponse](request, destination = brokerSocketServer(leaderId))
}
private def initProducer(): Unit = {
@ -133,12 +134,12 @@ class FetchRequestTest extends BaseRequestTest {
}.sum
assertTrue(responseSize3 <= maxResponseBytes)
val partitionData3 = fetchResponse3.responseData.get(partitionWithLargeMessage1)
assertEquals(Errors.NONE, partitionData3.error)
assertEquals(Errors.NONE.code, partitionData3.errorCode)
assertTrue(partitionData3.highWatermark > 0)
val size3 = records(partitionData3).map(_.sizeInBytes).sum
assertTrue(size3 <= maxResponseBytes, s"Expected $size3 to be smaller than $maxResponseBytes")
assertTrue(size3 > maxPartitionBytes, s"Expected $size3 to be larger than $maxPartitionBytes")
assertTrue(maxPartitionBytes < partitionData3.records.sizeInBytes)
assertTrue(maxPartitionBytes < FetchResponse.recordsSize(partitionData3))
// 4. Partition with message larger than the response limit at the start of the list
val shuffledTopicPartitions4 = Seq(partitionWithLargeMessage2, partitionWithLargeMessage1) ++
@ -151,11 +152,11 @@ class FetchRequestTest extends BaseRequestTest {
}
assertEquals(Seq(partitionWithLargeMessage2), nonEmptyPartitions4)
val partitionData4 = fetchResponse4.responseData.get(partitionWithLargeMessage2)
assertEquals(Errors.NONE, partitionData4.error)
assertEquals(Errors.NONE.code, partitionData4.errorCode)
assertTrue(partitionData4.highWatermark > 0)
val size4 = records(partitionData4).map(_.sizeInBytes).sum
assertTrue(size4 > maxResponseBytes, s"Expected $size4 to be larger than $maxResponseBytes")
assertTrue(maxResponseBytes < partitionData4.records.sizeInBytes)
assertTrue(maxResponseBytes < FetchResponse.recordsSize(partitionData4))
}
@Test
@ -169,9 +170,9 @@ class FetchRequestTest extends BaseRequestTest {
Seq(topicPartition))).build(2)
val fetchResponse = sendFetchRequest(leaderId, fetchRequest)
val partitionData = fetchResponse.responseData.get(topicPartition)
assertEquals(Errors.NONE, partitionData.error)
assertEquals(Errors.NONE.code, partitionData.errorCode)
assertTrue(partitionData.highWatermark > 0)
assertEquals(maxPartitionBytes, partitionData.records.sizeInBytes)
assertEquals(maxPartitionBytes, FetchResponse.recordsSize(partitionData))
assertEquals(0, records(partitionData).map(_.sizeInBytes).sum)
}
@ -186,7 +187,7 @@ class FetchRequestTest extends BaseRequestTest {
Seq(topicPartition))).isolationLevel(IsolationLevel.READ_COMMITTED).build(4)
val fetchResponse = sendFetchRequest(leaderId, fetchRequest)
val partitionData = fetchResponse.responseData.get(topicPartition)
assertEquals(Errors.NONE, partitionData.error)
assertEquals(Errors.NONE.code, partitionData.errorCode)
assertTrue(partitionData.lastStableOffset > 0)
assertTrue(records(partitionData).map(_.sizeInBytes).sum > 0)
}
@ -209,7 +210,7 @@ class FetchRequestTest extends BaseRequestTest {
Seq(topicPartition))).build()
val fetchResponse = sendFetchRequest(nonReplicaId, fetchRequest)
val partitionData = fetchResponse.responseData.get(topicPartition)
assertEquals(Errors.NOT_LEADER_OR_FOLLOWER, partitionData.error)
assertEquals(Errors.NOT_LEADER_OR_FOLLOWER.code, partitionData.errorCode)
}
@Test
@ -243,11 +244,11 @@ class FetchRequestTest extends BaseRequestTest {
// Validate the expected truncation
val fetchResponse = sendFetchRequest(secondLeaderId, fetchRequest)
val partitionData = fetchResponse.responseData.get(topicPartition)
assertEquals(Errors.NONE, partitionData.error)
assertEquals(0L, partitionData.records.sizeInBytes())
assertTrue(partitionData.divergingEpoch.isPresent)
assertEquals(Errors.NONE.code, partitionData.errorCode)
assertEquals(0L, FetchResponse.recordsSize(partitionData))
assertTrue(FetchResponse.isDivergingEpoch(partitionData))
val divergingEpoch = partitionData.divergingEpoch.get()
val divergingEpoch = partitionData.divergingEpoch
assertEquals(firstLeaderEpoch, divergingEpoch.epoch)
assertEquals(firstEpochEndOffset, divergingEpoch.endOffset)
}
@ -265,7 +266,7 @@ class FetchRequestTest extends BaseRequestTest {
val fetchRequest = FetchRequest.Builder.forConsumer(0, 1, partitionMap).build()
val fetchResponse = sendFetchRequest(brokerId, fetchRequest)
val partitionData = fetchResponse.responseData.get(topicPartition)
assertEquals(error, partitionData.error)
assertEquals(error.code, partitionData.errorCode)
}
// We need a leader change in order to check epoch fencing since the first epoch is 0 and
@ -329,7 +330,7 @@ class FetchRequestTest extends BaseRequestTest {
.build()
val fetchResponse = sendFetchRequest(destinationBrokerId, fetchRequest)
val partitionData = fetchResponse.responseData.get(topicPartition)
assertEquals(expectedError, partitionData.error)
assertEquals(expectedError.code, partitionData.errorCode)
}
// We only check errors because we do not expect the partition in the response otherwise
@ -366,7 +367,7 @@ class FetchRequestTest extends BaseRequestTest {
// batch is not complete, but sent when the producer is closed
futures.foreach(_.get)
def fetch(version: Short, maxPartitionBytes: Int, closeAfterPartialResponse: Boolean): Option[FetchResponse[MemoryRecords]] = {
def fetch(version: Short, maxPartitionBytes: Int, closeAfterPartialResponse: Boolean): Option[FetchResponse] = {
val fetchRequest = FetchRequest.Builder.forConsumer(Int.MaxValue, 0, createPartitionMap(maxPartitionBytes,
Seq(topicPartition))).build(version)
@ -383,7 +384,7 @@ class FetchRequestTest extends BaseRequestTest {
s"Fetch size too small $size, broker may have run out of memory")
None
} else {
Some(receive[FetchResponse[MemoryRecords]](socket, ApiKeys.FETCH, version))
Some(receive[FetchResponse](socket, ApiKeys.FETCH, version))
}
} finally {
socket.close()
@ -396,8 +397,8 @@ class FetchRequestTest extends BaseRequestTest {
val response = fetch(version, maxPartitionBytes = batchSize, closeAfterPartialResponse = false)
val fetchResponse = response.getOrElse(throw new IllegalStateException("No fetch response"))
val partitionData = fetchResponse.responseData.get(topicPartition)
assertEquals(Errors.NONE, partitionData.error)
val batches = partitionData.records.batches.asScala.toBuffer
assertEquals(Errors.NONE.code, partitionData.errorCode)
val batches = FetchResponse.recordsOrFail(partitionData).batches.asScala.toBuffer
assertEquals(3, batches.size) // size is 3 (not 4) since maxPartitionBytes=msgValueSize*4, excluding key and headers
}
@ -442,9 +443,9 @@ class FetchRequestTest extends BaseRequestTest {
// validate response
val partitionData = fetchResponse.responseData.get(topicPartition)
assertEquals(Errors.NONE, partitionData.error)
assertEquals(Errors.NONE.code, partitionData.errorCode)
assertTrue(partitionData.highWatermark > 0)
val batches = partitionData.records.batches.asScala.toBuffer
val batches = FetchResponse.recordsOrFail(partitionData).batches.asScala.toBuffer
val batch = batches.head
assertEquals(expectedMagic, batch.magic)
assertEquals(currentExpectedOffset, batch.baseOffset)
@ -504,35 +505,34 @@ class FetchRequestTest extends BaseRequestTest {
assertEquals(Errors.NONE, resp1.error())
assertTrue(resp1.sessionId() > 0, "Expected the broker to create a new incremental fetch session")
debug(s"Test created an incremental fetch session ${resp1.sessionId}")
assertTrue(resp1.responseData().containsKey(foo0))
assertTrue(resp1.responseData().containsKey(foo1))
assertTrue(resp1.responseData().containsKey(bar0))
assertEquals(Errors.NONE, resp1.responseData().get(foo0).error)
assertEquals(Errors.NONE, resp1.responseData().get(foo1).error)
assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, resp1.responseData().get(bar0).error)
assertTrue(resp1.responseData.containsKey(foo0))
assertTrue(resp1.responseData.containsKey(foo1))
assertTrue(resp1.responseData.containsKey(bar0))
assertEquals(Errors.NONE.code, resp1.responseData.get(foo0).errorCode)
assertEquals(Errors.NONE.code, resp1.responseData.get(foo1).errorCode)
assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION.code, resp1.responseData.get(bar0).errorCode)
val req2 = createFetchRequest(Nil, new JFetchMetadata(resp1.sessionId(), 1), Nil)
val resp2 = sendFetchRequest(0, req2)
assertEquals(Errors.NONE, resp2.error())
assertEquals(resp1.sessionId(),
resp2.sessionId(), "Expected the broker to continue the incremental fetch session")
assertEquals(resp1.sessionId(), resp2.sessionId(), "Expected the broker to continue the incremental fetch session")
assertFalse(resp2.responseData().containsKey(foo0))
assertFalse(resp2.responseData().containsKey(foo1))
assertTrue(resp2.responseData().containsKey(bar0))
assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, resp2.responseData().get(bar0).error)
assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION.code(), resp2.responseData().get(bar0).errorCode())
createTopic("bar", Map(0 -> List(0, 1)))
val req3 = createFetchRequest(Nil, new JFetchMetadata(resp1.sessionId(), 2), Nil)
val resp3 = sendFetchRequest(0, req3)
assertEquals(Errors.NONE, resp3.error())
assertFalse(resp3.responseData().containsKey(foo0))
assertFalse(resp3.responseData().containsKey(foo1))
assertTrue(resp3.responseData().containsKey(bar0))
assertEquals(Errors.NONE, resp3.responseData().get(bar0).error)
assertFalse(resp3.responseData.containsKey(foo0))
assertFalse(resp3.responseData.containsKey(foo1))
assertTrue(resp3.responseData.containsKey(bar0))
assertEquals(Errors.NONE.code, resp3.responseData.get(bar0).errorCode)
val req4 = createFetchRequest(Nil, new JFetchMetadata(resp1.sessionId(), 3), Nil)
val resp4 = sendFetchRequest(0, req4)
assertEquals(Errors.NONE, resp4.error())
assertFalse(resp4.responseData().containsKey(foo0))
assertFalse(resp4.responseData().containsKey(foo1))
assertFalse(resp4.responseData().containsKey(bar0))
assertFalse(resp4.responseData.containsKey(foo0))
assertFalse(resp4.responseData.containsKey(foo1))
assertFalse(resp4.responseData.containsKey(bar0))
}
@Test
@ -560,7 +560,7 @@ class FetchRequestTest extends BaseRequestTest {
val res0 = sendFetchRequest(leaderId, req0)
val data0 = res0.responseData.get(topicPartition)
assertEquals(Errors.UNSUPPORTED_COMPRESSION_TYPE, data0.error)
assertEquals(Errors.UNSUPPORTED_COMPRESSION_TYPE.code, data0.errorCode)
// fetch request with version 10: works fine!
val req1= new FetchRequest.Builder(0, 10, -1, Int.MaxValue, 0,
@ -568,14 +568,14 @@ class FetchRequestTest extends BaseRequestTest {
.setMaxBytes(800).build()
val res1 = sendFetchRequest(leaderId, req1)
val data1 = res1.responseData.get(topicPartition)
assertEquals(Errors.NONE, data1.error)
assertEquals(Errors.NONE.code, data1.errorCode)
assertEquals(3, records(data1).size)
}
@Test
def testPartitionDataEquals(): Unit = {
assertEquals(new FetchRequest.PartitionData(300, 0L, 300, Optional.of(300)),
new FetchRequest.PartitionData(300, 0L, 300, Optional.of(300)));
new FetchRequest.PartitionData(300, 0L, 300, Optional.of(300)))
}
@Test
@ -614,7 +614,7 @@ class FetchRequestTest extends BaseRequestTest {
val res0 = sendFetchRequest(leaderId, req0)
val data0 = res0.responseData.get(topicPartition)
assertEquals(Errors.NONE, data0.error)
assertEquals(Errors.NONE.code, data0.errorCode)
assertEquals(1, records(data0).size)
val req1 = new FetchRequest.Builder(0, 1, -1, Int.MaxValue, 0,
@ -623,7 +623,7 @@ class FetchRequestTest extends BaseRequestTest {
val res1 = sendFetchRequest(leaderId, req1)
val data1 = res1.responseData.get(topicPartition)
assertEquals(Errors.UNSUPPORTED_COMPRESSION_TYPE, data1.error)
assertEquals(Errors.UNSUPPORTED_COMPRESSION_TYPE.code, data1.errorCode)
// fetch request with fetch version v3 (magic 1):
// gzip compressed record is returned with down-conversion.
@ -634,7 +634,7 @@ class FetchRequestTest extends BaseRequestTest {
val res2 = sendFetchRequest(leaderId, req2)
val data2 = res2.responseData.get(topicPartition)
assertEquals(Errors.NONE, data2.error)
assertEquals(Errors.NONE.code, data2.errorCode)
assertEquals(1, records(data2).size)
val req3 = new FetchRequest.Builder(0, 1, -1, Int.MaxValue, 0,
@ -643,7 +643,7 @@ class FetchRequestTest extends BaseRequestTest {
val res3 = sendFetchRequest(leaderId, req3)
val data3 = res3.responseData.get(topicPartition)
assertEquals(Errors.UNSUPPORTED_COMPRESSION_TYPE, data3.error)
assertEquals(Errors.UNSUPPORTED_COMPRESSION_TYPE.code, data3.errorCode)
// fetch request with version 10: works fine!
val req4= new FetchRequest.Builder(0, 10, -1, Int.MaxValue, 0,
@ -651,15 +651,15 @@ class FetchRequestTest extends BaseRequestTest {
.setMaxBytes(800).build()
val res4 = sendFetchRequest(leaderId, req4)
val data4 = res4.responseData.get(topicPartition)
assertEquals(Errors.NONE, data4.error)
assertEquals(Errors.NONE.code, data4.errorCode)
assertEquals(3, records(data4).size)
}
private def records(partitionData: FetchResponse.PartitionData[MemoryRecords]): Seq[Record] = {
partitionData.records.records.asScala.toBuffer
private def records(partitionData: FetchResponseData.PartitionData): Seq[Record] = {
FetchResponse.recordsOrFail(partitionData).records.asScala.toBuffer
}
private def checkFetchResponse(expectedPartitions: Seq[TopicPartition], fetchResponse: FetchResponse[MemoryRecords],
private def checkFetchResponse(expectedPartitions: Seq[TopicPartition], fetchResponse: FetchResponse,
maxPartitionBytes: Int, maxResponseBytes: Int, numMessagesPerPartition: Int): Unit = {
assertEquals(expectedPartitions, fetchResponse.responseData.keySet.asScala.toSeq)
var emptyResponseSeen = false
@ -668,10 +668,10 @@ class FetchRequestTest extends BaseRequestTest {
expectedPartitions.foreach { tp =>
val partitionData = fetchResponse.responseData.get(tp)
assertEquals(Errors.NONE, partitionData.error)
assertEquals(Errors.NONE.code, partitionData.errorCode)
assertTrue(partitionData.highWatermark > 0)
val records = partitionData.records
val records = FetchResponse.recordsOrFail(partitionData)
responseBufferSize += records.sizeInBytes
val batches = records.batches.asScala.toBuffer

View File

@ -16,26 +16,26 @@
*/
package kafka.server
import java.util
import java.util.{Collections, Optional}
import kafka.utils.MockTime
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.message.FetchResponseData
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.record.Records
import org.apache.kafka.common.requests.FetchMetadata.{FINAL_EPOCH, INVALID_SESSION_ID}
import org.apache.kafka.common.requests.{FetchRequest, FetchResponse, FetchMetadata => JFetchMetadata}
import org.apache.kafka.common.requests.{FetchRequest, FetchMetadata => JFetchMetadata}
import org.apache.kafka.common.utils.Utils
import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api.{Test, Timeout}
import java.util
import java.util.{Collections, Optional}
@Timeout(120)
class FetchSessionTest {
@Test
def testNewSessionId(): Unit = {
val cache = new FetchSessionCache(3, 100)
for (i <- 0 to 10000) {
for (_ <- 0 to 10000) {
val id = cache.newSessionId()
assertTrue(id > 0)
}
@ -125,7 +125,7 @@ class FetchSessionTest {
assertEquals(3, cache.totalPartitions)
}
val EMPTY_PART_LIST = Collections.unmodifiableList(new util.ArrayList[TopicPartition]())
private val EMPTY_PART_LIST = Collections.unmodifiableList(new util.ArrayList[TopicPartition]())
@Test
@ -155,13 +155,22 @@ class FetchSessionTest {
assertEquals(Optional.of(1), epochs1(tp1))
assertEquals(Optional.of(2), epochs1(tp2))
val response = new util.LinkedHashMap[TopicPartition, FetchResponse.PartitionData[Records]]
response.put(tp0, new FetchResponse.PartitionData(Errors.NONE, 100, 100,
100, null, null))
response.put(tp1, new FetchResponse.PartitionData(
Errors.NONE, 10, 10, 10, null, null))
response.put(tp2, new FetchResponse.PartitionData(
Errors.NONE, 5, 5, 5, null, null))
val response = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
response.put(tp0, new FetchResponseData.PartitionData()
.setPartitionIndex(tp0.partition)
.setHighWatermark(100)
.setLastStableOffset(100)
.setLogStartOffset(100))
response.put(tp1, new FetchResponseData.PartitionData()
.setPartitionIndex(tp1.partition)
.setHighWatermark(10)
.setLastStableOffset(10)
.setLogStartOffset(10))
response.put(tp2, new FetchResponseData.PartitionData()
.setPartitionIndex(tp2.partition)
.setHighWatermark(5)
.setLastStableOffset(5)
.setLogStartOffset(5))
val sessionId = context1.updateAndGenerateResponseData(response).sessionId()
@ -220,10 +229,22 @@ class FetchSessionTest {
assertEquals(Map(tp0 -> Optional.empty, tp1 -> Optional.empty, tp2 -> Optional.of(1)),
cachedLastFetchedEpochs(context1))
val response = new util.LinkedHashMap[TopicPartition, FetchResponse.PartitionData[Records]]
response.put(tp0, new FetchResponse.PartitionData(Errors.NONE, 100, 100, 100, null, null))
response.put(tp1, new FetchResponse.PartitionData(Errors.NONE, 10, 10, 10, null, null))
response.put(tp2, new FetchResponse.PartitionData(Errors.NONE, 5, 5, 5, null, null))
val response = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
response.put(tp0, new FetchResponseData.PartitionData()
.setPartitionIndex(tp0.partition)
.setHighWatermark(100)
.setLastStableOffset(100)
.setLogStartOffset(100))
response.put(tp1, new FetchResponseData.PartitionData()
.setPartitionIndex(tp1.partition)
.setHighWatermark(10)
.setLastStableOffset(10)
.setLogStartOffset(10))
response.put(tp2, new FetchResponseData.PartitionData()
.setPartitionIndex(tp2.partition)
.setHighWatermark(5)
.setLastStableOffset(5)
.setLogStartOffset(5))
val sessionId = context1.updateAndGenerateResponseData(response).sessionId()
@ -275,15 +296,23 @@ class FetchSessionTest {
})
assertEquals(0, context2.getFetchOffset(new TopicPartition("foo", 0)).get)
assertEquals(10, context2.getFetchOffset(new TopicPartition("foo", 1)).get)
val respData2 = new util.LinkedHashMap[TopicPartition, FetchResponse.PartitionData[Records]]
respData2.put(new TopicPartition("foo", 0), new FetchResponse.PartitionData(
Errors.NONE, 100, 100, 100, null, null))
respData2.put(new TopicPartition("foo", 1), new FetchResponse.PartitionData(
Errors.NONE, 10, 10, 10, null, null))
val respData2 = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
respData2.put(new TopicPartition("foo", 0),
new FetchResponseData.PartitionData()
.setPartitionIndex(0)
.setHighWatermark(100)
.setLastStableOffset(100)
.setLogStartOffset(100))
respData2.put(new TopicPartition("foo", 1),
new FetchResponseData.PartitionData()
.setPartitionIndex(1)
.setHighWatermark(10)
.setLastStableOffset(10)
.setLogStartOffset(10))
val resp2 = context2.updateAndGenerateResponseData(respData2)
assertEquals(Errors.NONE, resp2.error())
assertTrue(resp2.sessionId() != INVALID_SESSION_ID)
assertEquals(respData2, resp2.responseData())
assertEquals(respData2, resp2.responseData)
// Test trying to create a new session with an invalid epoch
val context3 = fetchManager.newContext(
@ -314,7 +343,7 @@ class FetchSessionTest {
val resp5 = context5.updateAndGenerateResponseData(respData2)
assertEquals(Errors.NONE, resp5.error())
assertEquals(resp2.sessionId(), resp5.sessionId())
assertEquals(0, resp5.responseData().size())
assertEquals(0, resp5.responseData.size())
// Test setting an invalid fetch session epoch.
val context6 = fetchManager.newContext(
@ -345,11 +374,19 @@ class FetchSessionTest {
new JFetchMetadata(prevSessionId, FINAL_EPOCH), reqData8, EMPTY_PART_LIST, false)
assertEquals(classOf[SessionlessFetchContext], context8.getClass)
assertEquals(0, cache.size)
val respData8 = new util.LinkedHashMap[TopicPartition, FetchResponse.PartitionData[Records]]
val respData8 = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
respData8.put(new TopicPartition("bar", 0),
new FetchResponse.PartitionData(Errors.NONE, 100, 100, 100, null, null))
new FetchResponseData.PartitionData()
.setPartitionIndex(0)
.setHighWatermark(100)
.setLastStableOffset(100)
.setLogStartOffset(100))
respData8.put(new TopicPartition("bar", 1),
new FetchResponse.PartitionData(Errors.NONE, 100, 100, 100, null, null))
new FetchResponseData.PartitionData()
.setPartitionIndex(1)
.setHighWatermark(100)
.setLastStableOffset(100)
.setLogStartOffset(100))
val resp8 = context8.updateAndGenerateResponseData(respData8)
assertEquals(Errors.NONE, resp8.error)
nextSessionId = resp8.sessionId
@ -370,15 +407,21 @@ class FetchSessionTest {
Optional.empty()))
val context1 = fetchManager.newContext(JFetchMetadata.INITIAL, reqData1, EMPTY_PART_LIST, false)
assertEquals(classOf[FullFetchContext], context1.getClass)
val respData1 = new util.LinkedHashMap[TopicPartition, FetchResponse.PartitionData[Records]]
respData1.put(new TopicPartition("foo", 0), new FetchResponse.PartitionData(
Errors.NONE, 100, 100, 100, null, null))
respData1.put(new TopicPartition("foo", 1), new FetchResponse.PartitionData(
Errors.NONE, 10, 10, 10, null, null))
val respData1 = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
respData1.put(new TopicPartition("foo", 0), new FetchResponseData.PartitionData()
.setPartitionIndex(0)
.setHighWatermark(100)
.setLastStableOffset(100)
.setLogStartOffset(100))
respData1.put(new TopicPartition("foo", 1), new FetchResponseData.PartitionData()
.setPartitionIndex(1)
.setHighWatermark(10)
.setLastStableOffset(10)
.setLogStartOffset(10))
val resp1 = context1.updateAndGenerateResponseData(respData1)
assertEquals(Errors.NONE, resp1.error())
assertTrue(resp1.sessionId() != INVALID_SESSION_ID)
assertEquals(2, resp1.responseData().size())
assertEquals(2, resp1.responseData.size())
// Create an incremental fetch request that removes foo-0 and adds bar-0
val reqData2 = new util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData]
@ -391,18 +434,26 @@ class FetchSessionTest {
assertEquals(classOf[IncrementalFetchContext], context2.getClass)
val parts2 = Set(new TopicPartition("foo", 1), new TopicPartition("bar", 0))
val reqData2Iter = parts2.iterator
context2.foreachPartition((topicPart, data) => {
context2.foreachPartition((topicPart, _) => {
assertEquals(reqData2Iter.next(), topicPart)
})
assertEquals(None, context2.getFetchOffset(new TopicPartition("foo", 0)))
assertEquals(10, context2.getFetchOffset(new TopicPartition("foo", 1)).get)
assertEquals(15, context2.getFetchOffset(new TopicPartition("bar", 0)).get)
assertEquals(None, context2.getFetchOffset(new TopicPartition("bar", 2)))
val respData2 = new util.LinkedHashMap[TopicPartition, FetchResponse.PartitionData[Records]]
respData2.put(new TopicPartition("foo", 1), new FetchResponse.PartitionData(
Errors.NONE, 10, 10, 10, null, null))
respData2.put(new TopicPartition("bar", 0), new FetchResponse.PartitionData(
Errors.NONE, 10, 10, 10, null, null))
val respData2 = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
respData2.put(new TopicPartition("foo", 1),
new FetchResponseData.PartitionData()
.setPartitionIndex(1)
.setHighWatermark(10)
.setLastStableOffset(10)
.setLogStartOffset(10))
respData2.put(new TopicPartition("bar", 0),
new FetchResponseData.PartitionData()
.setPartitionIndex(0)
.setHighWatermark(10)
.setLastStableOffset(10)
.setLogStartOffset(10))
val resp2 = context2.updateAndGenerateResponseData(respData2)
assertEquals(Errors.NONE, resp2.error)
assertEquals(1, resp2.responseData.size)
@ -424,15 +475,21 @@ class FetchSessionTest {
Optional.empty()))
val session1context1 = fetchManager.newContext(JFetchMetadata.INITIAL, session1req, EMPTY_PART_LIST, false)
assertEquals(classOf[FullFetchContext], session1context1.getClass)
val respData1 = new util.LinkedHashMap[TopicPartition, FetchResponse.PartitionData[Records]]
respData1.put(new TopicPartition("foo", 0), new FetchResponse.PartitionData(
Errors.NONE, 100, 100, 100, null, null))
respData1.put(new TopicPartition("foo", 1), new FetchResponse.PartitionData(
Errors.NONE, 10, 10, 10, null, null))
val respData1 = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
respData1.put(new TopicPartition("foo", 0), new FetchResponseData.PartitionData()
.setPartitionIndex(0)
.setHighWatermark(100)
.setLastStableOffset(100)
.setLogStartOffset(100))
respData1.put(new TopicPartition("foo", 1), new FetchResponseData.PartitionData()
.setPartitionIndex(1)
.setHighWatermark(10)
.setLastStableOffset(10)
.setLogStartOffset(10))
val session1resp = session1context1.updateAndGenerateResponseData(respData1)
assertEquals(Errors.NONE, session1resp.error())
assertTrue(session1resp.sessionId() != INVALID_SESSION_ID)
assertEquals(2, session1resp.responseData().size())
assertEquals(2, session1resp.responseData.size)
// check session entered into case
assertTrue(cache.get(session1resp.sessionId()).isDefined)
@ -446,15 +503,22 @@ class FetchSessionTest {
Optional.empty()))
val session2context = fetchManager.newContext(JFetchMetadata.INITIAL, session1req, EMPTY_PART_LIST, false)
assertEquals(classOf[FullFetchContext], session2context.getClass)
val session2RespData = new util.LinkedHashMap[TopicPartition, FetchResponse.PartitionData[Records]]
session2RespData.put(new TopicPartition("foo", 0), new FetchResponse.PartitionData(
Errors.NONE, 100, 100, 100, null, null))
session2RespData.put(new TopicPartition("foo", 1), new FetchResponse.PartitionData(
Errors.NONE, 10, 10, 10, null, null))
val session2RespData = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
session2RespData.put(new TopicPartition("foo", 0),
new FetchResponseData.PartitionData()
.setPartitionIndex(0)
.setHighWatermark(100)
.setLastStableOffset(100)
.setLogStartOffset(100))
session2RespData.put(new TopicPartition("foo", 1), new FetchResponseData.PartitionData()
.setPartitionIndex(1)
.setHighWatermark(10)
.setLastStableOffset(10)
.setLogStartOffset(10))
val session2resp = session2context.updateAndGenerateResponseData(respData1)
assertEquals(Errors.NONE, session2resp.error())
assertTrue(session2resp.sessionId() != INVALID_SESSION_ID)
assertEquals(2, session2resp.responseData().size())
assertEquals(2, session2resp.responseData.size)
// both newly created entries are present in cache
assertTrue(cache.get(session1resp.sessionId()).isDefined)
@ -481,19 +545,25 @@ class FetchSessionTest {
Optional.empty()))
val session3context = fetchManager.newContext(JFetchMetadata.INITIAL, session3req, EMPTY_PART_LIST, false)
assertEquals(classOf[FullFetchContext], session3context.getClass)
val respData3 = new util.LinkedHashMap[TopicPartition, FetchResponse.PartitionData[Records]]
respData3.put(new TopicPartition("foo", 0), new FetchResponse.PartitionData(
Errors.NONE, 100, 100, 100, null, null))
respData3.put(new TopicPartition("foo", 1), new FetchResponse.PartitionData(
Errors.NONE, 10, 10, 10, null, null))
val respData3 = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
respData3.put(new TopicPartition("foo", 0), new FetchResponseData.PartitionData()
.setPartitionIndex(0)
.setHighWatermark(100)
.setLastStableOffset(100)
.setLogStartOffset(100))
respData3.put(new TopicPartition("foo", 1),
new FetchResponseData.PartitionData()
.setPartitionIndex(1)
.setHighWatermark(10)
.setLastStableOffset(10)
.setLogStartOffset(10))
val session3resp = session3context.updateAndGenerateResponseData(respData3)
assertEquals(Errors.NONE, session3resp.error())
assertTrue(session3resp.sessionId() != INVALID_SESSION_ID)
assertEquals(2, session3resp.responseData().size())
assertEquals(2, session3resp.responseData.size)
assertTrue(cache.get(session1resp.sessionId()).isDefined)
assertFalse(cache.get(session2resp.sessionId()).isDefined,
"session 2 should have been evicted by latest session, as session 1 was used more recently")
assertFalse(cache.get(session2resp.sessionId()).isDefined, "session 2 should have been evicted by latest session, as session 1 was used more recently")
assertTrue(cache.get(session3resp.sessionId()).isDefined)
}
@ -512,15 +582,21 @@ class FetchSessionTest {
Optional.empty()))
val session1context = fetchManager.newContext(JFetchMetadata.INITIAL, session1req, EMPTY_PART_LIST, true)
assertEquals(classOf[FullFetchContext], session1context.getClass)
val respData1 = new util.LinkedHashMap[TopicPartition, FetchResponse.PartitionData[Records]]
respData1.put(new TopicPartition("foo", 0), new FetchResponse.PartitionData(
Errors.NONE, 100, 100, 100, null, null))
respData1.put(new TopicPartition("foo", 1), new FetchResponse.PartitionData(
Errors.NONE, 10, 10, 10, null, null))
val respData1 = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
respData1.put(new TopicPartition("foo", 0), new FetchResponseData.PartitionData()
.setPartitionIndex(0)
.setHighWatermark(100)
.setLastStableOffset(100)
.setLogStartOffset(100))
respData1.put(new TopicPartition("foo", 1), new FetchResponseData.PartitionData()
.setPartitionIndex(1)
.setHighWatermark(10)
.setLastStableOffset(10)
.setLogStartOffset(10))
val session1resp = session1context.updateAndGenerateResponseData(respData1)
assertEquals(Errors.NONE, session1resp.error())
assertTrue(session1resp.sessionId() != INVALID_SESSION_ID)
assertEquals(2, session1resp.responseData().size())
assertEquals(2, session1resp.responseData.size)
assertEquals(1, cache.size)
// move time forward to age session 1 a little compared to session 2
@ -534,15 +610,23 @@ class FetchSessionTest {
Optional.empty()))
val session2context = fetchManager.newContext(JFetchMetadata.INITIAL, session1req, EMPTY_PART_LIST, false)
assertEquals(classOf[FullFetchContext], session2context.getClass)
val session2RespData = new util.LinkedHashMap[TopicPartition, FetchResponse.PartitionData[Records]]
session2RespData.put(new TopicPartition("foo", 0), new FetchResponse.PartitionData(
Errors.NONE, 100, 100, 100, null, null))
session2RespData.put(new TopicPartition("foo", 1), new FetchResponse.PartitionData(
Errors.NONE, 10, 10, 10, null, null))
val session2RespData = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
session2RespData.put(new TopicPartition("foo", 0),
new FetchResponseData.PartitionData()
.setPartitionIndex(0)
.setHighWatermark(100)
.setLastStableOffset(100)
.setLogStartOffset(100))
session2RespData.put(new TopicPartition("foo", 1),
new FetchResponseData.PartitionData()
.setPartitionIndex(1)
.setHighWatermark(10)
.setLastStableOffset(10)
.setLogStartOffset(10))
val session2resp = session2context.updateAndGenerateResponseData(respData1)
assertEquals(Errors.NONE, session2resp.error())
assertTrue(session2resp.sessionId() != INVALID_SESSION_ID)
assertEquals(2, session2resp.responseData().size())
assertEquals(2, session2resp.responseData.size)
// both newly created entries are present in cache
assertTrue(cache.get(session1resp.sessionId()).isDefined)
@ -558,21 +642,28 @@ class FetchSessionTest {
Optional.empty()))
val session3context = fetchManager.newContext(JFetchMetadata.INITIAL, session3req, EMPTY_PART_LIST, true)
assertEquals(classOf[FullFetchContext], session3context.getClass)
val respData3 = new util.LinkedHashMap[TopicPartition, FetchResponse.PartitionData[Records]]
respData3.put(new TopicPartition("foo", 0), new FetchResponse.PartitionData(
Errors.NONE, 100, 100, 100, null, null))
respData3.put(new TopicPartition("foo", 1), new FetchResponse.PartitionData(
Errors.NONE, 10, 10, 10, null, null))
val respData3 = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
respData3.put(new TopicPartition("foo", 0),
new FetchResponseData.PartitionData()
.setPartitionIndex(0)
.setHighWatermark(100)
.setLastStableOffset(100)
.setLogStartOffset(100))
respData3.put(new TopicPartition("foo", 1),
new FetchResponseData.PartitionData()
.setPartitionIndex(1)
.setHighWatermark(10)
.setLastStableOffset(10)
.setLogStartOffset(10))
val session3resp = session3context.updateAndGenerateResponseData(respData3)
assertEquals(Errors.NONE, session3resp.error())
assertTrue(session3resp.sessionId() != INVALID_SESSION_ID)
assertEquals(2, session3resp.responseData().size())
assertEquals(2, session3resp.responseData.size)
assertTrue(cache.get(session1resp.sessionId()).isDefined)
// even though session 2 is more recent than session 1, and has not reached expiry time, it is less
// privileged than session 2, and thus session 3 should be entered and session 2 evicted.
assertFalse(cache.get(session2resp.sessionId()).isDefined,
"session 2 should have been evicted by session 3")
assertFalse(cache.get(session2resp.sessionId()).isDefined, "session 2 should have been evicted by session 3")
assertTrue(cache.get(session3resp.sessionId()).isDefined)
assertEquals(2, cache.size)
@ -586,18 +677,25 @@ class FetchSessionTest {
Optional.empty()))
val session4context = fetchManager.newContext(JFetchMetadata.INITIAL, session4req, EMPTY_PART_LIST, true)
assertEquals(classOf[FullFetchContext], session4context.getClass)
val respData4 = new util.LinkedHashMap[TopicPartition, FetchResponse.PartitionData[Records]]
respData4.put(new TopicPartition("foo", 0), new FetchResponse.PartitionData(
Errors.NONE, 100, 100, 100, null, null))
respData4.put(new TopicPartition("foo", 1), new FetchResponse.PartitionData(
Errors.NONE, 10, 10, 10, null, null))
val respData4 = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
respData4.put(new TopicPartition("foo", 0),
new FetchResponseData.PartitionData()
.setPartitionIndex(0)
.setHighWatermark(100)
.setLastStableOffset(100)
.setLogStartOffset(100))
respData4.put(new TopicPartition("foo", 1),
new FetchResponseData.PartitionData()
.setPartitionIndex(1)
.setHighWatermark(10)
.setLastStableOffset(10)
.setLogStartOffset(10))
val session4resp = session3context.updateAndGenerateResponseData(respData4)
assertEquals(Errors.NONE, session4resp.error())
assertTrue(session4resp.sessionId() != INVALID_SESSION_ID)
assertEquals(2, session4resp.responseData().size())
assertEquals(2, session4resp.responseData.size)
assertFalse(cache.get(session1resp.sessionId()).isDefined,
"session 1 should have been evicted by session 4 even though it is privileged as it has hit eviction time")
assertFalse(cache.get(session1resp.sessionId()).isDefined, "session 1 should have been evicted by session 4 even though it is privileged as it has hit eviction time")
assertTrue(cache.get(session3resp.sessionId()).isDefined)
assertTrue(cache.get(session4resp.sessionId()).isDefined)
assertEquals(2, cache.size)
@ -617,11 +715,17 @@ class FetchSessionTest {
Optional.empty()))
val context1 = fetchManager.newContext(JFetchMetadata.INITIAL, reqData1, EMPTY_PART_LIST, false)
assertEquals(classOf[FullFetchContext], context1.getClass)
val respData1 = new util.LinkedHashMap[TopicPartition, FetchResponse.PartitionData[Records]]
respData1.put(new TopicPartition("foo", 0), new FetchResponse.PartitionData(
Errors.NONE, 100, 100, 100, null, null))
respData1.put(new TopicPartition("foo", 1), new FetchResponse.PartitionData(
Errors.NONE, 10, 10, 10, null, null))
val respData1 = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
respData1.put(new TopicPartition("foo", 0), new FetchResponseData.PartitionData()
.setPartitionIndex(0)
.setHighWatermark(100)
.setLastStableOffset(100)
.setLogStartOffset(100))
respData1.put(new TopicPartition("foo", 1), new FetchResponseData.PartitionData()
.setPartitionIndex(1)
.setHighWatermark(10)
.setLastStableOffset(10)
.setLogStartOffset(10))
val resp1 = context1.updateAndGenerateResponseData(respData1)
assertEquals(Errors.NONE, resp1.error)
assertTrue(resp1.sessionId() != INVALID_SESSION_ID)
@ -636,10 +740,10 @@ class FetchSessionTest {
val context2 = fetchManager.newContext(
new JFetchMetadata(resp1.sessionId, 1), reqData2, removed2, false)
assertEquals(classOf[SessionlessFetchContext], context2.getClass)
val respData2 = new util.LinkedHashMap[TopicPartition, FetchResponse.PartitionData[Records]]
val respData2 = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
val resp2 = context2.updateAndGenerateResponseData(respData2)
assertEquals(INVALID_SESSION_ID, resp2.sessionId)
assertTrue(resp2.responseData().isEmpty)
assertTrue(resp2.responseData.isEmpty)
assertEquals(0, cache.size)
}
@ -658,12 +762,19 @@ class FetchSessionTest {
// Full fetch context returns all partitions in the response
val context1 = fetchManager.newContext(JFetchMetadata.INITIAL, reqData, EMPTY_PART_LIST, isFollower = false)
assertEquals(classOf[FullFetchContext], context1.getClass)
val respData = new util.LinkedHashMap[TopicPartition, FetchResponse.PartitionData[Records]]
respData.put(tp1, new FetchResponse.PartitionData(Errors.NONE,
105, 105, 0, Optional.empty(), Collections.emptyList(), Optional.empty(), null))
val divergingEpoch = Optional.of(new FetchResponseData.EpochEndOffset().setEpoch(3).setEndOffset(90))
respData.put(tp2, new FetchResponse.PartitionData(Errors.NONE,
105, 105, 0, Optional.empty(), Collections.emptyList(), divergingEpoch, null))
val respData = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
respData.put(tp1, new FetchResponseData.PartitionData()
.setPartitionIndex(tp1.partition)
.setHighWatermark(105)
.setLastStableOffset(105)
.setLogStartOffset(0))
val divergingEpoch = new FetchResponseData.EpochEndOffset().setEpoch(3).setEndOffset(90)
respData.put(tp2, new FetchResponseData.PartitionData()
.setPartitionIndex(tp2.partition)
.setHighWatermark(105)
.setLastStableOffset(105)
.setLogStartOffset(0)
.setDivergingEpoch(divergingEpoch))
val resp1 = context1.updateAndGenerateResponseData(respData)
assertEquals(Errors.NONE, resp1.error)
assertNotEquals(INVALID_SESSION_ID, resp1.sessionId)
@ -679,8 +790,12 @@ class FetchSessionTest {
assertEquals(Collections.singleton(tp2), resp2.responseData.keySet)
// All partitions with divergent epoch should be returned.
respData.put(tp1, new FetchResponse.PartitionData(Errors.NONE,
105, 105, 0, Optional.empty(), Collections.emptyList(), divergingEpoch, null))
respData.put(tp1, new FetchResponseData.PartitionData()
.setPartitionIndex(tp1.partition)
.setHighWatermark(105)
.setLastStableOffset(105)
.setLogStartOffset(0)
.setDivergingEpoch(divergingEpoch))
val resp3 = context2.updateAndGenerateResponseData(respData)
assertEquals(Errors.NONE, resp3.error)
assertEquals(resp1.sessionId, resp3.sessionId)
@ -688,8 +803,11 @@ class FetchSessionTest {
// Partitions that meet other conditions should be returned regardless of whether
// divergingEpoch is set or not.
respData.put(tp1, new FetchResponse.PartitionData(Errors.NONE,
110, 110, 0, Optional.empty(), Collections.emptyList(), Optional.empty(), null))
respData.put(tp1, new FetchResponseData.PartitionData()
.setPartitionIndex(tp1.partition)
.setHighWatermark(110)
.setLastStableOffset(110)
.setLogStartOffset(0))
val resp4 = context2.updateAndGenerateResponseData(respData)
assertEquals(Errors.NONE, resp4.error)
assertEquals(resp1.sessionId, resp4.sessionId)

View File

@ -1072,7 +1072,7 @@ class KafkaApisTest {
val response = capturedResponse.getValue.asInstanceOf[OffsetCommitResponse]
assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION,
Errors.forCode(response.data().topics().get(0).partitions().get(0).errorCode()))
Errors.forCode(response.data.topics().get(0).partitions().get(0).errorCode))
}
checkInvalidPartition(-1)
@ -1425,9 +1425,9 @@ class KafkaApisTest {
val produceRequest = ProduceRequest.forCurrentMagic(new ProduceRequestData()
.setTopicData(new ProduceRequestData.TopicProduceDataCollection(
Collections.singletonList(new ProduceRequestData.TopicProduceData()
.setName(tp.topic()).setPartitionData(Collections.singletonList(
.setName(tp.topic).setPartitionData(Collections.singletonList(
new ProduceRequestData.PartitionProduceData()
.setIndex(tp.partition())
.setIndex(tp.partition)
.setRecords(MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("test".getBytes))))))
.iterator))
.setAcks(1.toShort)
@ -1632,21 +1632,21 @@ class KafkaApisTest {
val topicStates = Seq(
new StopReplicaTopicState()
.setTopicName(groupMetadataPartition.topic())
.setTopicName(groupMetadataPartition.topic)
.setPartitionStates(Seq(new StopReplicaPartitionState()
.setPartitionIndex(groupMetadataPartition.partition())
.setPartitionIndex(groupMetadataPartition.partition)
.setLeaderEpoch(leaderEpoch)
.setDeletePartition(deletePartition)).asJava),
new StopReplicaTopicState()
.setTopicName(txnStatePartition.topic())
.setTopicName(txnStatePartition.topic)
.setPartitionStates(Seq(new StopReplicaPartitionState()
.setPartitionIndex(txnStatePartition.partition())
.setPartitionIndex(txnStatePartition.partition)
.setLeaderEpoch(leaderEpoch)
.setDeletePartition(deletePartition)).asJava),
new StopReplicaTopicState()
.setTopicName(fooPartition.topic())
.setTopicName(fooPartition.topic)
.setPartitionStates(Seq(new StopReplicaPartitionState()
.setPartitionIndex(fooPartition.partition())
.setPartitionIndex(fooPartition.partition)
.setLeaderEpoch(leaderEpoch)
.setDeletePartition(deletePartition)).asJava)
).asJava
@ -1806,8 +1806,8 @@ class KafkaApisTest {
val response = capturedResponse.getValue.asInstanceOf[DescribeGroupsResponse]
val group = response.data().groups().get(0)
assertEquals(Errors.NONE, Errors.forCode(group.errorCode()))
val group = response.data.groups().get(0)
assertEquals(Errors.NONE, Errors.forCode(group.errorCode))
assertEquals(groupId, group.groupId())
assertEquals(groupSummary.state, group.groupState())
assertEquals(groupSummary.protocolType, group.protocolType())
@ -1873,7 +1873,7 @@ class KafkaApisTest {
val response = capturedResponse.getValue.asInstanceOf[OffsetDeleteResponse]
def errorForPartition(topic: String, partition: Int): Errors = {
Errors.forCode(response.data.topics.find(topic).partitions.find(partition).errorCode())
Errors.forCode(response.data.topics.find(topic).partitions.find(partition).errorCode)
}
assertEquals(2, response.data.topics.size)
@ -1914,7 +1914,7 @@ class KafkaApisTest {
val response = capturedResponse.getValue.asInstanceOf[OffsetDeleteResponse]
assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION,
Errors.forCode(response.data.topics.find(topic).partitions.find(invalidPartitionId).errorCode()))
Errors.forCode(response.data.topics.find(topic).partitions.find(invalidPartitionId).errorCode))
}
checkInvalidPartition(-1)
@ -1942,7 +1942,7 @@ class KafkaApisTest {
val response = capturedResponse.getValue.asInstanceOf[OffsetDeleteResponse]
assertEquals(Errors.GROUP_ID_NOT_FOUND, Errors.forCode(response.data.errorCode()))
assertEquals(Errors.GROUP_ID_NOT_FOUND, Errors.forCode(response.data.errorCode))
}
private def testListOffsetFailedGetLeaderReplica(error: Errors): Unit = {
@ -2130,16 +2130,15 @@ class KafkaApisTest {
EasyMock.replay(replicaManager, clientQuotaManager, clientRequestQuotaManager, requestChannel, fetchManager)
createKafkaApis().handleFetchRequest(request)
val response = capturedResponse.getValue.asInstanceOf[FetchResponse[BaseRecords]]
val response = capturedResponse.getValue.asInstanceOf[FetchResponse]
assertTrue(response.responseData.containsKey(tp))
val partitionData = response.responseData.get(tp)
assertEquals(Errors.NONE, partitionData.error)
assertEquals(Errors.NONE.code, partitionData.errorCode)
assertEquals(hw, partitionData.highWatermark)
assertEquals(-1, partitionData.lastStableOffset)
assertEquals(0, partitionData.logStartOffset)
assertEquals(timestamp,
partitionData.records.asInstanceOf[MemoryRecords].batches.iterator.next.maxTimestamp)
assertEquals(timestamp, FetchResponse.recordsOrFail(partitionData).batches.iterator.next.maxTimestamp)
assertNull(partitionData.abortedTransactions)
}
@ -2563,7 +2562,7 @@ class KafkaApisTest {
.setPartitions(Collections.singletonList(
new OffsetCommitResponseData.OffsetCommitResponsePartition()
.setPartitionIndex(0)
.setErrorCode(Errors.UNSUPPORTED_VERSION.code())
.setErrorCode(Errors.UNSUPPORTED_VERSION.code)
))
)
val response = capturedResponse.getValue.asInstanceOf[OffsetCommitResponse]
@ -2871,9 +2870,9 @@ class KafkaApisTest {
val fooPartition = new TopicPartition("foo", 0)
val topicStates = Seq(
new StopReplicaTopicState()
.setTopicName(fooPartition.topic())
.setTopicName(fooPartition.topic)
.setPartitionStates(Seq(new StopReplicaPartitionState()
.setPartitionIndex(fooPartition.partition())
.setPartitionIndex(fooPartition.partition)
.setLeaderEpoch(1)
.setDeletePartition(false)).asJava)
).asJava
@ -3246,15 +3245,18 @@ class KafkaApisTest {
@Test
def testSizeOfThrottledPartitions(): Unit = {
def fetchResponse(data: Map[TopicPartition, String]): FetchResponse[Records] = {
val responseData = new util.LinkedHashMap[TopicPartition, FetchResponse.PartitionData[Records]](
def fetchResponse(data: Map[TopicPartition, String]): FetchResponse = {
val responseData = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData](
data.map { case (tp, raw) =>
tp -> new FetchResponse.PartitionData(Errors.NONE,
105, 105, 0, Optional.empty(), Collections.emptyList(), Optional.empty(),
MemoryRecords.withRecords(CompressionType.NONE,
new SimpleRecord(100, raw.getBytes(StandardCharsets.UTF_8))).asInstanceOf[Records])
tp -> new FetchResponseData.PartitionData()
.setPartitionIndex(tp.partition)
.setHighWatermark(105)
.setLastStableOffset(105)
.setLogStartOffset(0)
.setRecords(MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord(100, raw.getBytes(StandardCharsets.UTF_8))))
}.toMap.asJava)
new FetchResponse(Errors.NONE, responseData, 100, 100)
FetchResponse.of(Errors.NONE, 100, 100, responseData)
}
val throttledPartition = new TopicPartition("throttledData", 0)

View File

@ -17,26 +17,22 @@
package kafka.server
import java.io.File
import java.util.concurrent.atomic.AtomicInteger
import java.util.{Optional, Properties, Random}
import kafka.log.{ClientRecordDeletion, Log, LogSegment}
import kafka.utils.{MockTime, TestUtils}
import org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsTopic
import org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsPartition
import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsPartitionResponse
import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse
import org.apache.kafka.common.message.ListOffsetsRequestData.{ListOffsetsPartition, ListOffsetsTopic}
import org.apache.kafka.common.message.ListOffsetsResponseData.{ListOffsetsPartitionResponse, ListOffsetsTopicResponse}
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.record.MemoryRecords
import org.apache.kafka.common.requests.{FetchRequest, FetchResponse, ListOffsetsRequest, ListOffsetsResponse}
import org.apache.kafka.common.{IsolationLevel, TopicPartition}
import org.easymock.{EasyMock, IAnswer}
import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api.Test
import scala.jdk.CollectionConverters._
import java.io.File
import java.util.concurrent.atomic.AtomicInteger
import java.util.{Optional, Properties, Random}
import scala.collection.mutable.Buffer
import scala.jdk.CollectionConverters._
class LogOffsetTest extends BaseRequestTest {
@ -127,7 +123,7 @@ class LogOffsetTest extends BaseRequestTest {
Map(topicPartition -> new FetchRequest.PartitionData(consumerOffsets.head, FetchRequest.INVALID_LOG_START_OFFSET,
300 * 1024, Optional.empty())).asJava).build()
val fetchResponse = sendFetchRequest(fetchRequest)
assertFalse(fetchResponse.responseData.get(topicPartition).records.batches.iterator.hasNext)
assertFalse(FetchResponse.recordsOrFail(fetchResponse.responseData.get(topicPartition)).batches.iterator.hasNext)
}
@Test
@ -251,8 +247,8 @@ class LogOffsetTest extends BaseRequestTest {
connectAndReceive[ListOffsetsResponse](request)
}
private def sendFetchRequest(request: FetchRequest): FetchResponse[MemoryRecords] = {
connectAndReceive[FetchResponse[MemoryRecords]](request)
private def sendFetchRequest(request: FetchRequest): FetchResponse = {
connectAndReceive[FetchResponse](request)
}
private def buildTargetTimes(tp: TopicPartition, timestamp: Long, maxNumOffsets: Int): List[ListOffsetsTopic] = {

View File

@ -16,9 +16,6 @@
*/
package kafka.server
import java.nio.charset.StandardCharsets
import java.util.{Collections, Optional}
import kafka.api.{ApiVersion, KAFKA_2_6_IV0}
import kafka.cluster.{BrokerEndPoint, Partition}
import kafka.log.{Log, LogAppendInfo, LogManager}
@ -32,17 +29,18 @@ import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEnd
import org.apache.kafka.common.metrics.Metrics
import org.apache.kafka.common.protocol.Errors._
import org.apache.kafka.common.protocol.{ApiKeys, Errors}
import org.apache.kafka.common.record.{CompressionType, MemoryRecords, Records, SimpleRecord}
import org.apache.kafka.common.record.{CompressionType, MemoryRecords, SimpleRecord}
import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.{UNDEFINED_EPOCH, UNDEFINED_EPOCH_OFFSET}
import org.apache.kafka.common.requests.FetchResponse
import org.apache.kafka.common.utils.SystemTime
import org.easymock.EasyMock._
import org.easymock.{Capture, CaptureType}
import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api.{AfterEach, Test}
import scala.jdk.CollectionConverters._
import java.nio.charset.StandardCharsets
import java.util.Collections
import scala.collection.{Map, mutable}
import scala.jdk.CollectionConverters._
class ReplicaFetcherThreadTest {
@ -531,16 +529,18 @@ class ReplicaFetcherThreadTest {
assertEquals(1, mockNetwork.fetchCount)
partitions.foreach { tp => assertEquals(Fetching, thread.fetchState(tp).get.state) }
def partitionData(divergingEpoch: FetchResponseData.EpochEndOffset): FetchResponse.PartitionData[Records] = {
new FetchResponse.PartitionData[Records](
Errors.NONE, 0, 0, 0, Optional.empty(), Collections.emptyList(),
Optional.of(divergingEpoch), MemoryRecords.EMPTY)
def partitionData(partition: Int, divergingEpoch: FetchResponseData.EpochEndOffset): FetchResponseData.PartitionData = {
new FetchResponseData.PartitionData()
.setPartitionIndex(partition)
.setLastStableOffset(0)
.setLogStartOffset(0)
.setDivergingEpoch(divergingEpoch)
}
// Loop 2 should truncate based on diverging epoch and continue to send fetch requests.
mockNetwork.setFetchPartitionDataForNextResponse(Map(
t1p0 -> partitionData(new FetchResponseData.EpochEndOffset().setEpoch(4).setEndOffset(140)),
t1p1 -> partitionData(new FetchResponseData.EpochEndOffset().setEpoch(4).setEndOffset(141))
t1p0 -> partitionData(t1p0.partition, new FetchResponseData.EpochEndOffset().setEpoch(4).setEndOffset(140)),
t1p1 -> partitionData(t1p1.partition, new FetchResponseData.EpochEndOffset().setEpoch(4).setEndOffset(141))
))
latestLogEpoch = Some(4)
thread.doWork()
@ -555,8 +555,8 @@ class ReplicaFetcherThreadTest {
// Loop 3 should truncate because of diverging epoch. Offset truncation is not complete
// because divergent epoch is not known to follower. We truncate and stay in Fetching state.
mockNetwork.setFetchPartitionDataForNextResponse(Map(
t1p0 -> partitionData(new FetchResponseData.EpochEndOffset().setEpoch(3).setEndOffset(130)),
t1p1 -> partitionData(new FetchResponseData.EpochEndOffset().setEpoch(3).setEndOffset(131))
t1p0 -> partitionData(t1p0.partition, new FetchResponseData.EpochEndOffset().setEpoch(3).setEndOffset(130)),
t1p1 -> partitionData(t1p1.partition, new FetchResponseData.EpochEndOffset().setEpoch(3).setEndOffset(131))
))
thread.doWork()
assertEquals(0, mockNetwork.epochFetchCount)
@ -569,8 +569,8 @@ class ReplicaFetcherThreadTest {
// because divergent epoch is not known to follower. Last fetched epoch cannot be determined
// from the log. We truncate and stay in Fetching state.
mockNetwork.setFetchPartitionDataForNextResponse(Map(
t1p0 -> partitionData(new FetchResponseData.EpochEndOffset().setEpoch(2).setEndOffset(120)),
t1p1 -> partitionData(new FetchResponseData.EpochEndOffset().setEpoch(2).setEndOffset(121))
t1p0 -> partitionData(t1p0.partition, new FetchResponseData.EpochEndOffset().setEpoch(2).setEndOffset(120)),
t1p1 -> partitionData(t1p1.partition, new FetchResponseData.EpochEndOffset().setEpoch(2).setEndOffset(121))
))
latestLogEpoch = None
thread.doWork()
@ -963,9 +963,11 @@ class ReplicaFetcherThreadTest {
val records = MemoryRecords.withRecords(CompressionType.NONE,
new SimpleRecord(1000, "foo".getBytes(StandardCharsets.UTF_8)))
val partitionData: thread.FetchData = new FetchResponse.PartitionData[Records](
Errors.NONE, 0, 0, 0, Optional.empty(), Collections.emptyList(), records)
val partitionData: thread.FetchData = new FetchResponseData.PartitionData()
.setPartitionIndex(t1p0.partition)
.setLastStableOffset(0)
.setLogStartOffset(0)
.setRecords(records)
thread.processPartitionData(t1p0, 0, partitionData)
if (isReassigning)

View File

@ -17,24 +17,17 @@
package kafka.server
import java.io.File
import java.net.InetAddress
import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference}
import java.util.concurrent.{CountDownLatch, TimeUnit}
import java.util.{Collections, Optional, Properties}
import kafka.api._
import kafka.log.{AppendOrigin, Log, LogConfig, LogManager, ProducerStateManager}
import kafka.cluster.{BrokerEndPoint, Partition}
import kafka.log.LeaderOffsetIncremented
import kafka.log._
import kafka.server.QuotaFactory.{QuotaManagers, UnboundedQuota}
import kafka.server.checkpoints.LazyOffsetCheckpoints
import kafka.server.checkpoints.OffsetCheckpointFile
import kafka.server.checkpoints.{LazyOffsetCheckpoints, OffsetCheckpointFile}
import kafka.server.epoch.util.ReplicaFetcherMockBlockingSend
import kafka.server.metadata.CachedConfigRepository
import kafka.utils.TestUtils.createBroker
import kafka.utils.timer.MockTimer
import kafka.utils.{MockScheduler, MockTime, TestUtils}
import org.apache.kafka.common.message.FetchResponseData
import org.apache.kafka.common.message.LeaderAndIsrRequestData
import org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrPartitionState
import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset
@ -45,21 +38,23 @@ import org.apache.kafka.common.record._
import org.apache.kafka.common.replica.ClientMetadata
import org.apache.kafka.common.replica.ClientMetadata.DefaultClientMetadata
import org.apache.kafka.common.requests.FetchRequest.PartitionData
import org.apache.kafka.common.requests.FetchResponse.AbortedTransaction
import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse
import org.apache.kafka.common.requests._
import org.apache.kafka.common.security.auth.KafkaPrincipal
import org.apache.kafka.common.utils.Time
import org.apache.kafka.common.utils.Utils
import org.apache.kafka.common.utils.{Time, Utils}
import org.apache.kafka.common.{IsolationLevel, Node, TopicPartition, Uuid}
import org.easymock.EasyMock
import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api.{AfterEach, BeforeEach, Test}
import org.mockito.Mockito
import scala.collection.mutable
import java.io.File
import java.net.InetAddress
import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference}
import java.util.concurrent.{CountDownLatch, TimeUnit}
import java.util.{Collections, Optional, Properties}
import scala.collection.{Map, Seq, mutable}
import scala.jdk.CollectionConverters._
import scala.collection.{Map, Seq}
class ReplicaManagerTest {
@ -403,7 +398,7 @@ class ReplicaManagerTest {
assertEquals(Errors.NONE, fetchData.error)
assertTrue(fetchData.records.batches.asScala.isEmpty)
assertEquals(Some(0), fetchData.lastStableOffset)
assertEquals(Some(List.empty[AbortedTransaction]), fetchData.abortedTransactions)
assertEquals(Some(List.empty[FetchResponseData.AbortedTransaction]), fetchData.abortedTransactions)
// delayed fetch should timeout and return nothing
consumerFetchResult = fetchAsConsumer(replicaManager, new TopicPartition(topic, 0),
@ -416,7 +411,7 @@ class ReplicaManagerTest {
assertEquals(Errors.NONE, fetchData.error)
assertTrue(fetchData.records.batches.asScala.isEmpty)
assertEquals(Some(0), fetchData.lastStableOffset)
assertEquals(Some(List.empty[AbortedTransaction]), fetchData.abortedTransactions)
assertEquals(Some(List.empty[FetchResponseData.AbortedTransaction]), fetchData.abortedTransactions)
// now commit the transaction
val endTxnMarker = new EndTransactionMarker(ControlRecordType.COMMIT, 0)
@ -448,7 +443,7 @@ class ReplicaManagerTest {
fetchData = consumerFetchResult.assertFired
assertEquals(Errors.NONE, fetchData.error)
assertEquals(Some(numRecords + 1), fetchData.lastStableOffset)
assertEquals(Some(List.empty[AbortedTransaction]), fetchData.abortedTransactions)
assertEquals(Some(List.empty[FetchResponseData.AbortedTransaction]), fetchData.abortedTransactions)
assertEquals(numRecords + 1, fetchData.records.batches.asScala.size)
} finally {
replicaManager.shutdown(checkpointHW = false)

View File

@ -193,7 +193,7 @@ class UpdateFeaturesTest extends BaseRequestTest {
new UpdateFeaturesRequest.Builder(new UpdateFeaturesRequestData().setFeatureUpdates(validUpdates)).build(),
notControllerSocketServer)
assertEquals(Errors.NOT_CONTROLLER, Errors.forCode(response.data.errorCode()))
assertEquals(Errors.NOT_CONTROLLER, Errors.forCode(response.data.errorCode))
assertNotNull(response.data.errorMessage())
assertEquals(0, response.data.results.size)
checkFeatures(

View File

@ -18,14 +18,12 @@ package kafka.server.epoch.util
import java.net.SocketTimeoutException
import java.util
import kafka.cluster.BrokerEndPoint
import kafka.server.BlockingSend
import org.apache.kafka.clients.{ClientRequest, ClientResponse, MockClient, NetworkClientUtils}
import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData
import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.{OffsetForLeaderTopicResult, EpochEndOffset}
import org.apache.kafka.common.message.{FetchResponseData, OffsetForLeaderEpochResponseData}
import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.{EpochEndOffset, OffsetForLeaderTopicResult}
import org.apache.kafka.common.protocol.{ApiKeys, Errors}
import org.apache.kafka.common.record.Records
import org.apache.kafka.common.requests.AbstractRequest.Builder
import org.apache.kafka.common.requests.{AbstractRequest, FetchResponse, OffsetsForLeaderEpochResponse, FetchMetadata => JFetchMetadata}
import org.apache.kafka.common.utils.{SystemTime, Time}
@ -52,7 +50,7 @@ class ReplicaFetcherMockBlockingSend(offsets: java.util.Map[TopicPartition, Epoc
var lastUsedOffsetForLeaderEpochVersion = -1
var callback: Option[() => Unit] = None
var currentOffsets: util.Map[TopicPartition, EpochEndOffset] = offsets
var fetchPartitionData: Map[TopicPartition, FetchResponse.PartitionData[Records]] = Map.empty
var fetchPartitionData: Map[TopicPartition, FetchResponseData.PartitionData] = Map.empty
private val sourceNode = new Node(sourceBroker.id, sourceBroker.host, sourceBroker.port)
def setEpochRequestCallback(postEpochFunction: () => Unit): Unit = {
@ -63,7 +61,7 @@ class ReplicaFetcherMockBlockingSend(offsets: java.util.Map[TopicPartition, Epoc
currentOffsets = newOffsets
}
def setFetchPartitionDataForNextResponse(partitionData: Map[TopicPartition, FetchResponse.PartitionData[Records]]): Unit = {
def setFetchPartitionDataForNextResponse(partitionData: Map[TopicPartition, FetchResponseData.PartitionData]): Unit = {
fetchPartitionData = partitionData
}
@ -97,11 +95,11 @@ class ReplicaFetcherMockBlockingSend(offsets: java.util.Map[TopicPartition, Epoc
case ApiKeys.FETCH =>
fetchCount += 1
val partitionData = new util.LinkedHashMap[TopicPartition, FetchResponse.PartitionData[Records]]
val partitionData = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
fetchPartitionData.foreach { case (tp, data) => partitionData.put(tp, data) }
fetchPartitionData = Map.empty
new FetchResponse(Errors.NONE, partitionData, 0,
if (partitionData.isEmpty) JFetchMetadata.INVALID_SESSION_ID else 1)
FetchResponse.of(Errors.NONE, 0,
if (partitionData.isEmpty) JFetchMetadata.INVALID_SESSION_ID else 1, partitionData)
case _ =>
throw new UnsupportedOperationException

View File

@ -18,6 +18,7 @@
package org.apache.kafka.jmh.common;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.message.FetchResponseData;
import org.apache.kafka.common.network.Send;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.Errors;
@ -42,9 +43,7 @@ import org.openjdk.jmh.annotations.Warmup;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.Optional;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
@ -61,11 +60,11 @@ public class FetchResponseBenchmark {
@Param({"3", "10", "20"})
private int partitionCount;
LinkedHashMap<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> responseData;
LinkedHashMap<TopicPartition, FetchResponseData.PartitionData> responseData;
ResponseHeader header;
FetchResponse<MemoryRecords> fetchResponse;
FetchResponse fetchResponse;
@Setup(Level.Trial)
public void setup() {
@ -78,19 +77,22 @@ public class FetchResponseBenchmark {
for (int topicIdx = 0; topicIdx < topicCount; topicIdx++) {
String topic = UUID.randomUUID().toString();
for (int partitionId = 0; partitionId < partitionCount; partitionId++) {
FetchResponse.PartitionData<MemoryRecords> partitionData = new FetchResponse.PartitionData<>(
Errors.NONE, 0, 0, 0, Optional.empty(), Collections.emptyList(), records);
FetchResponseData.PartitionData partitionData = new FetchResponseData.PartitionData()
.setPartitionIndex(partitionId)
.setLastStableOffset(0)
.setLogStartOffset(0)
.setRecords(records);
responseData.put(new TopicPartition(topic, partitionId), partitionData);
}
}
this.header = new ResponseHeader(100, ApiKeys.FETCH.responseHeaderVersion(ApiKeys.FETCH.latestVersion()));
this.fetchResponse = new FetchResponse<>(Errors.NONE, responseData, 0, 0);
this.fetchResponse = FetchResponse.of(Errors.NONE, 0, 0, responseData);
}
@Benchmark
public int testConstructFetchResponse() {
FetchResponse<MemoryRecords> fetchResponse = new FetchResponse<>(Errors.NONE, responseData, 0, 0);
FetchResponse fetchResponse = FetchResponse.of(Errors.NONE, 0, 0, responseData);
return fetchResponse.responseData().size();
}

View File

@ -44,13 +44,13 @@ import kafka.server.metadata.CachedConfigRepository;
import kafka.utils.KafkaScheduler;
import kafka.utils.Pool;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.message.FetchResponseData;
import org.apache.kafka.common.message.LeaderAndIsrRequestData;
import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderPartition;
import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset;
import org.apache.kafka.common.metrics.Metrics;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.record.BaseRecords;
import org.apache.kafka.common.record.Records;
import org.apache.kafka.common.record.RecordsSend;
import org.apache.kafka.common.requests.FetchRequest;
import org.apache.kafka.common.requests.FetchResponse;
@ -82,7 +82,6 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Optional;
import java.util.Properties;
@ -137,7 +136,7 @@ public class ReplicaFetcherThreadBenchmark {
Time.SYSTEM,
true);
LinkedHashMap<TopicPartition, FetchResponse.PartitionData<BaseRecords>> initialFetched = new LinkedHashMap<>();
LinkedHashMap<TopicPartition, FetchResponseData.PartitionData> initialFetched = new LinkedHashMap<>();
scala.collection.mutable.Map<TopicPartition, InitialFetchState> initialFetchStates = new scala.collection.mutable.HashMap<>();
for (int i = 0; i < partitionCount; i++) {
TopicPartition tp = new TopicPartition("topic", i);
@ -174,8 +173,11 @@ public class ReplicaFetcherThreadBenchmark {
return null;
}
};
initialFetched.put(tp, new FetchResponse.PartitionData<>(Errors.NONE, 0, 0, 0,
new LinkedList<>(), fetched));
initialFetched.put(tp, new FetchResponseData.PartitionData()
.setPartitionIndex(tp.partition())
.setLastStableOffset(0)
.setLogStartOffset(0)
.setRecords(fetched));
}
ReplicaManager replicaManager = Mockito.mock(ReplicaManager.class);
@ -186,7 +188,7 @@ public class ReplicaFetcherThreadBenchmark {
// so that we do not measure this time as part of the steady state work
fetcher.doWork();
// handle response to engage the incremental fetch session handler
fetcher.fetchSessionHandler().handleResponse(new FetchResponse<>(Errors.NONE, initialFetched, 0, 999));
fetcher.fetchSessionHandler().handleResponse(FetchResponse.of(Errors.NONE, 0, 999, initialFetched));
}
@TearDown(Level.Trial)
@ -292,7 +294,8 @@ public class ReplicaFetcherThreadBenchmark {
}
@Override
public Option<LogAppendInfo> processPartitionData(TopicPartition topicPartition, long fetchOffset, FetchResponse.PartitionData partitionData) {
public Option<LogAppendInfo> processPartitionData(TopicPartition topicPartition, long fetchOffset,
FetchResponseData.PartitionData partitionData) {
return Option.empty();
}
@ -317,7 +320,7 @@ public class ReplicaFetcherThreadBenchmark {
}
@Override
public Map<TopicPartition, FetchResponse.PartitionData<Records>> fetchFromLeader(FetchRequest.Builder fetchRequest) {
public Map<TopicPartition, FetchResponseData.PartitionData> fetchFromLeader(FetchRequest.Builder fetchRequest) {
return new scala.collection.mutable.HashMap<>();
}
}

View File

@ -19,8 +19,8 @@ package org.apache.kafka.jmh.fetchsession;
import org.apache.kafka.clients.FetchSessionHandler;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.message.FetchResponseData;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.record.MemoryRecords;
import org.apache.kafka.common.requests.FetchRequest;
import org.apache.kafka.common.requests.FetchResponse;
import org.apache.kafka.common.utils.LogContext;
@ -70,24 +70,21 @@ public class FetchSessionBenchmark {
handler = new FetchSessionHandler(LOG_CONTEXT, 1);
FetchSessionHandler.Builder builder = handler.newBuilder();
LinkedHashMap<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> respMap = new LinkedHashMap<>();
LinkedHashMap<TopicPartition, FetchResponseData.PartitionData> respMap = new LinkedHashMap<>();
for (int i = 0; i < partitionCount; i++) {
TopicPartition tp = new TopicPartition("foo", i);
FetchRequest.PartitionData partitionData = new FetchRequest.PartitionData(0, 0, 200,
Optional.empty());
fetches.put(tp, partitionData);
builder.add(tp, partitionData);
respMap.put(tp, new FetchResponse.PartitionData<>(
Errors.NONE,
0L,
0L,
0,
null,
null));
respMap.put(tp, new FetchResponseData.PartitionData()
.setPartitionIndex(tp.partition())
.setLastStableOffset(0)
.setLogStartOffset(0));
}
builder.build();
// build and handle an initial response so that the next fetch will be incremental
handler.handleResponse(new FetchResponse<>(Errors.NONE, respMap, 0, 1));
handler.handleResponse(FetchResponse.of(Errors.NONE, 0, 1, respMap));
int counter = 0;
for (TopicPartition topicPartition: new ArrayList<>(fetches.keySet())) {

View File

@ -40,6 +40,7 @@ import org.apache.kafka.common.metrics.Metrics;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.requests.FetchResponse;
import org.apache.kafka.common.utils.BufferSupplier;
import org.apache.kafka.common.record.CompressionType;
import org.apache.kafka.common.record.MemoryRecords;
@ -906,7 +907,7 @@ public class KafkaRaftClient<T> implements RaftClient<T> {
) {
return RaftUtil.singletonFetchResponse(log.topicPartition(), Errors.NONE, partitionData -> {
partitionData
.setRecordSet(records)
.setRecords(records)
.setErrorCode(error.code())
.setLogStartOffset(log.startOffset())
.setHighWatermark(highWatermark
@ -991,11 +992,11 @@ public class KafkaRaftClient<T> implements RaftClient<T> {
}
FetchResponseData response = tryCompleteFetchRequest(request.replicaId(), fetchPartition, currentTimeMs);
FetchResponseData.FetchablePartitionResponse partitionResponse =
response.responses().get(0).partitionResponses().get(0);
FetchResponseData.PartitionData partitionResponse =
response.responses().get(0).partitions().get(0);
if (partitionResponse.errorCode() != Errors.NONE.code()
|| partitionResponse.recordSet().sizeInBytes() > 0
|| FetchResponse.recordsSize(partitionResponse) > 0
|| request.maxWaitMs() == 0) {
return completedFuture(response);
}
@ -1084,8 +1085,8 @@ public class KafkaRaftClient<T> implements RaftClient<T> {
return false;
}
FetchResponseData.FetchablePartitionResponse partitionResponse =
response.responses().get(0).partitionResponses().get(0);
FetchResponseData.PartitionData partitionResponse =
response.responses().get(0).partitions().get(0);
FetchResponseData.LeaderIdAndEpoch currentLeaderIdAndEpoch = partitionResponse.currentLeader();
OptionalInt responseLeaderId = optionalLeaderId(currentLeaderIdAndEpoch.leaderId());
@ -1143,7 +1144,7 @@ public class KafkaRaftClient<T> implements RaftClient<T> {
state.setFetchingSnapshot(Optional.of(log.createSnapshot(snapshotId)));
}
} else {
Records records = (Records) partitionResponse.recordSet();
Records records = FetchResponse.recordsOrFail(partitionResponse);
if (records.sizeInBytes() > 0) {
appendAsFollower(records);
}

View File

@ -73,19 +73,19 @@ public class RaftUtil {
public static FetchResponseData singletonFetchResponse(
TopicPartition topicPartition,
Errors topLevelError,
Consumer<FetchResponseData.FetchablePartitionResponse> partitionConsumer
Consumer<FetchResponseData.PartitionData> partitionConsumer
) {
FetchResponseData.FetchablePartitionResponse fetchablePartition =
new FetchResponseData.FetchablePartitionResponse();
FetchResponseData.PartitionData fetchablePartition =
new FetchResponseData.PartitionData();
fetchablePartition.setPartition(topicPartition.partition());
fetchablePartition.setPartitionIndex(topicPartition.partition());
partitionConsumer.accept(fetchablePartition);
FetchResponseData.FetchableTopicResponse fetchableTopic =
new FetchResponseData.FetchableTopicResponse()
.setTopic(topicPartition.topic())
.setPartitionResponses(Collections.singletonList(fetchablePartition));
.setPartitions(Collections.singletonList(fetchablePartition));
return new FetchResponseData()
.setErrorCode(topLevelError.code())
@ -102,8 +102,8 @@ public class RaftUtil {
static boolean hasValidTopicPartition(FetchResponseData data, TopicPartition topicPartition) {
return data.responses().size() == 1 &&
data.responses().get(0).topic().equals(topicPartition.topic()) &&
data.responses().get(0).partitionResponses().size() == 1 &&
data.responses().get(0).partitionResponses().get(0).partition() == topicPartition.partition();
data.responses().get(0).partitions().size() == 1 &&
data.responses().get(0).partitions().get(0).partitionIndex() == topicPartition.partition();
}
static boolean hasValidTopicPartition(VoteResponseData data, TopicPartition topicPartition) {

View File

@ -89,7 +89,7 @@ final public class KafkaRaftClientSnapshotTest {
// Send Fetch request less than start offset
context.deliverRequest(context.fetchRequest(epoch, otherNodeId, 0, epoch, 0));
context.pollUntilResponse();
FetchResponseData.FetchablePartitionResponse partitionResponse = context.assertSentFetchPartitionResponse();
FetchResponseData.PartitionData partitionResponse = context.assertSentFetchPartitionResponse();
assertEquals(Errors.NONE, Errors.forCode(partitionResponse.errorCode()));
assertEquals(epoch, partitionResponse.currentLeader().leaderEpoch());
assertEquals(localId, partitionResponse.currentLeader().leaderId());
@ -176,7 +176,7 @@ final public class KafkaRaftClientSnapshotTest {
context.fetchRequest(epoch, otherNodeId, oldestSnapshotId.offset + 1, oldestSnapshotId.epoch + 1, 0)
);
context.pollUntilResponse();
FetchResponseData.FetchablePartitionResponse partitionResponse = context.assertSentFetchPartitionResponse();
FetchResponseData.PartitionData partitionResponse = context.assertSentFetchPartitionResponse();
assertEquals(Errors.NONE, Errors.forCode(partitionResponse.errorCode()));
assertEquals(epoch, partitionResponse.currentLeader().leaderEpoch());
assertEquals(localId, partitionResponse.currentLeader().leaderId());
@ -265,7 +265,7 @@ final public class KafkaRaftClientSnapshotTest {
context.fetchRequest(epoch, otherNodeId, oldestSnapshotId.offset, oldestSnapshotId.epoch + 1, 0)
);
context.pollUntilResponse();
FetchResponseData.FetchablePartitionResponse partitionResponse = context.assertSentFetchPartitionResponse();
FetchResponseData.PartitionData partitionResponse = context.assertSentFetchPartitionResponse();
assertEquals(Errors.NONE, Errors.forCode(partitionResponse.errorCode()));
assertEquals(epoch, partitionResponse.currentLeader().leaderEpoch());
assertEquals(localId, partitionResponse.currentLeader().leaderId());
@ -318,7 +318,7 @@ final public class KafkaRaftClientSnapshotTest {
)
);
context.pollUntilResponse();
FetchResponseData.FetchablePartitionResponse partitionResponse = context.assertSentFetchPartitionResponse();
FetchResponseData.PartitionData partitionResponse = context.assertSentFetchPartitionResponse();
assertEquals(Errors.NONE, Errors.forCode(partitionResponse.errorCode()));
assertEquals(epoch, partitionResponse.currentLeader().leaderEpoch());
assertEquals(localId, partitionResponse.currentLeader().leaderId());
@ -1329,9 +1329,7 @@ final public class KafkaRaftClientSnapshotTest {
long highWatermark
) {
return RaftUtil.singletonFetchResponse(topicPartition, Errors.NONE, partitionData -> {
partitionData
.setErrorCode(Errors.NONE.code())
.setHighWatermark(highWatermark);
partitionData.setHighWatermark(highWatermark);
partitionData.currentLeader()
.setLeaderEpoch(epoch)

View File

@ -606,7 +606,7 @@ public final class RaftClientTestContext {
return raftMessage.correlationId();
}
FetchResponseData.FetchablePartitionResponse assertSentFetchPartitionResponse() {
FetchResponseData.PartitionData assertSentFetchPartitionResponse() {
List<RaftResponse.Outbound> sentMessages = drainSentResponses(ApiKeys.FETCH);
assertEquals(
1, sentMessages.size(), "Found unexpected sent messages " + sentMessages);
@ -617,8 +617,8 @@ public final class RaftClientTestContext {
assertEquals(1, response.responses().size());
assertEquals(metadataPartition.topic(), response.responses().get(0).topic());
assertEquals(1, response.responses().get(0).partitionResponses().size());
return response.responses().get(0).partitionResponses().get(0);
assertEquals(1, response.responses().get(0).partitions().size());
return response.responses().get(0).partitions().get(0);
}
void assertSentFetchPartitionResponse(Errors error) {
@ -637,7 +637,7 @@ public final class RaftClientTestContext {
int epoch,
OptionalInt leaderId
) {
FetchResponseData.FetchablePartitionResponse partitionResponse = assertSentFetchPartitionResponse();
FetchResponseData.PartitionData partitionResponse = assertSentFetchPartitionResponse();
assertEquals(error, Errors.forCode(partitionResponse.errorCode()));
assertEquals(epoch, partitionResponse.currentLeader().leaderEpoch());
assertEquals(leaderId.orElse(-1), partitionResponse.currentLeader().leaderId());
@ -645,14 +645,14 @@ public final class RaftClientTestContext {
assertEquals(-1, partitionResponse.divergingEpoch().epoch());
assertEquals(-1, partitionResponse.snapshotId().endOffset());
assertEquals(-1, partitionResponse.snapshotId().epoch());
return (MemoryRecords) partitionResponse.recordSet();
return (MemoryRecords) partitionResponse.records();
}
MemoryRecords assertSentFetchPartitionResponse(
long highWatermark,
int leaderEpoch
) {
FetchResponseData.FetchablePartitionResponse partitionResponse = assertSentFetchPartitionResponse();
FetchResponseData.PartitionData partitionResponse = assertSentFetchPartitionResponse();
assertEquals(Errors.NONE, Errors.forCode(partitionResponse.errorCode()));
assertEquals(leaderEpoch, partitionResponse.currentLeader().leaderEpoch());
assertEquals(highWatermark, partitionResponse.highWatermark());
@ -660,7 +660,7 @@ public final class RaftClientTestContext {
assertEquals(-1, partitionResponse.divergingEpoch().epoch());
assertEquals(-1, partitionResponse.snapshotId().endOffset());
assertEquals(-1, partitionResponse.snapshotId().epoch());
return (MemoryRecords) partitionResponse.recordSet();
return (MemoryRecords) partitionResponse.records();
}
RaftRequest.Outbound assertSentFetchSnapshotRequest() {
@ -928,7 +928,7 @@ public final class RaftClientTestContext {
) {
return RaftUtil.singletonFetchResponse(metadataPartition, Errors.NONE, partitionData -> {
partitionData
.setRecordSet(records)
.setRecords(records)
.setErrorCode(error.code())
.setHighWatermark(highWatermark);
@ -946,9 +946,7 @@ public final class RaftClientTestContext {
long highWatermark
) {
return RaftUtil.singletonFetchResponse(metadataPartition, Errors.NONE, partitionData -> {
partitionData
.setErrorCode(Errors.NONE.code())
.setHighWatermark(highWatermark);
partitionData.setHighWatermark(highWatermark);
partitionData.currentLeader()
.setLeaderEpoch(epoch)