KAFKA-18465: Remove MetadataVersions older than 3.0-IV1 (#18468)

Apache Kafka 4.0 will only support KRaft and 3.0-IV1 is the minimum version supported by KRaft. So, we can assume that Apache Kafka 4.0 will only communicate with brokers that are 3.0-IV1 or newer.

Note that KRaft was only marked as production-ready in 3.3, so we could go further and set the baseline to 3.3. I think we should have that discussion, but it made sense to start with the non controversial parts.

Reviewers: Jun Rao <junrao@gmail.com>, Chia-Ping Tsai <chia7712@gmail.com>, David Jacot <david.jacot@gmail.com>
This commit is contained in:
Ismael Juma 2025-01-11 09:42:39 -08:00
parent 97fb8be251
commit 04900e761b
87 changed files with 431 additions and 2181 deletions

View File

@ -118,7 +118,7 @@ public final class ClientUtils {
SecurityProtocol securityProtocol = SecurityProtocol.forName(config.getString(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG)); SecurityProtocol securityProtocol = SecurityProtocol.forName(config.getString(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG));
String clientSaslMechanism = config.getString(SaslConfigs.SASL_MECHANISM); String clientSaslMechanism = config.getString(SaslConfigs.SASL_MECHANISM);
return ChannelBuilders.clientChannelBuilder(securityProtocol, JaasContext.Type.CLIENT, config, null, return ChannelBuilders.clientChannelBuilder(securityProtocol, JaasContext.Type.CLIENT, config, null,
clientSaslMechanism, time, true, logContext); clientSaslMechanism, time, logContext);
} }
static List<InetAddress> resolve(String host, HostResolver hostResolver) throws UnknownHostException { static List<InetAddress> resolve(String host, HostResolver hostResolver) throws UnknownHostException {

View File

@ -56,8 +56,6 @@ public class ChannelBuilders {
* @param listenerName the listenerName if contextType is SERVER or null otherwise * @param listenerName the listenerName if contextType is SERVER or null otherwise
* @param clientSaslMechanism SASL mechanism if mode is CLIENT, ignored otherwise * @param clientSaslMechanism SASL mechanism if mode is CLIENT, ignored otherwise
* @param time the time instance * @param time the time instance
* @param saslHandshakeRequestEnable flag to enable Sasl handshake requests; disabled only for SASL
* inter-broker connections with inter-broker protocol version < 0.10
* @param logContext the log context instance * @param logContext the log context instance
* *
* @return the configured `ChannelBuilder` * @return the configured `ChannelBuilder`
@ -70,7 +68,6 @@ public class ChannelBuilders {
ListenerName listenerName, ListenerName listenerName,
String clientSaslMechanism, String clientSaslMechanism,
Time time, Time time,
boolean saslHandshakeRequestEnable,
LogContext logContext) { LogContext logContext) {
if (securityProtocol == SecurityProtocol.SASL_PLAINTEXT || securityProtocol == SecurityProtocol.SASL_SSL) { if (securityProtocol == SecurityProtocol.SASL_PLAINTEXT || securityProtocol == SecurityProtocol.SASL_SSL) {
@ -80,7 +77,7 @@ public class ChannelBuilders {
throw new IllegalArgumentException("`clientSaslMechanism` must be non-null in client mode if `securityProtocol` is `" + securityProtocol + "`"); throw new IllegalArgumentException("`clientSaslMechanism` must be non-null in client mode if `securityProtocol` is `" + securityProtocol + "`");
} }
return create(securityProtocol, ConnectionMode.CLIENT, contextType, config, listenerName, false, clientSaslMechanism, return create(securityProtocol, ConnectionMode.CLIENT, contextType, config, listenerName, false, clientSaslMechanism,
saslHandshakeRequestEnable, null, null, time, logContext, null); null, null, time, logContext, null);
} }
/** /**
@ -106,8 +103,8 @@ public class ChannelBuilders {
LogContext logContext, LogContext logContext,
Function<Short, ApiVersionsResponse> apiVersionSupplier) { Function<Short, ApiVersionsResponse> apiVersionSupplier) {
return create(securityProtocol, ConnectionMode.SERVER, JaasContext.Type.SERVER, config, listenerName, return create(securityProtocol, ConnectionMode.SERVER, JaasContext.Type.SERVER, config, listenerName,
isInterBrokerListener, null, true, credentialCache, isInterBrokerListener, null, credentialCache, tokenCache, time, logContext,
tokenCache, time, logContext, apiVersionSupplier); apiVersionSupplier);
} }
private static ChannelBuilder create(SecurityProtocol securityProtocol, private static ChannelBuilder create(SecurityProtocol securityProtocol,
@ -117,7 +114,6 @@ public class ChannelBuilders {
ListenerName listenerName, ListenerName listenerName,
boolean isInterBrokerListener, boolean isInterBrokerListener,
String clientSaslMechanism, String clientSaslMechanism,
boolean saslHandshakeRequestEnable,
CredentialCache credentialCache, CredentialCache credentialCache,
DelegationTokenCache tokenCache, DelegationTokenCache tokenCache,
Time time, Time time,
@ -175,7 +171,6 @@ public class ChannelBuilders {
listenerName, listenerName,
isInterBrokerListener, isInterBrokerListener,
clientSaslMechanism, clientSaslMechanism,
saslHandshakeRequestEnable,
credentialCache, credentialCache,
tokenCache, tokenCache,
sslClientAuthOverride, sslClientAuthOverride,

View File

@ -85,7 +85,6 @@ public class SaslChannelBuilder implements ChannelBuilder, ListenerReconfigurabl
private final String clientSaslMechanism; private final String clientSaslMechanism;
private final ConnectionMode connectionMode; private final ConnectionMode connectionMode;
private final Map<String, JaasContext> jaasContexts; private final Map<String, JaasContext> jaasContexts;
private final boolean handshakeRequestEnable;
private final CredentialCache credentialCache; private final CredentialCache credentialCache;
private final DelegationTokenCache tokenCache; private final DelegationTokenCache tokenCache;
private final Map<String, LoginManager> loginManagers; private final Map<String, LoginManager> loginManagers;
@ -108,7 +107,6 @@ public class SaslChannelBuilder implements ChannelBuilder, ListenerReconfigurabl
ListenerName listenerName, ListenerName listenerName,
boolean isInterBrokerListener, boolean isInterBrokerListener,
String clientSaslMechanism, String clientSaslMechanism,
boolean handshakeRequestEnable,
CredentialCache credentialCache, CredentialCache credentialCache,
DelegationTokenCache tokenCache, DelegationTokenCache tokenCache,
String sslClientAuthOverride, String sslClientAuthOverride,
@ -122,7 +120,6 @@ public class SaslChannelBuilder implements ChannelBuilder, ListenerReconfigurabl
this.securityProtocol = securityProtocol; this.securityProtocol = securityProtocol;
this.listenerName = listenerName; this.listenerName = listenerName;
this.isInterBrokerListener = isInterBrokerListener; this.isInterBrokerListener = isInterBrokerListener;
this.handshakeRequestEnable = handshakeRequestEnable;
this.clientSaslMechanism = clientSaslMechanism; this.clientSaslMechanism = clientSaslMechanism;
this.credentialCache = credentialCache; this.credentialCache = credentialCache;
this.tokenCache = tokenCache; this.tokenCache = tokenCache;
@ -295,7 +292,7 @@ public class SaslChannelBuilder implements ChannelBuilder, ListenerReconfigurabl
String servicePrincipal, String servicePrincipal,
TransportLayer transportLayer, Subject subject) { TransportLayer transportLayer, Subject subject) {
return new SaslClientAuthenticator(configs, callbackHandler, id, subject, servicePrincipal, return new SaslClientAuthenticator(configs, callbackHandler, id, subject, servicePrincipal,
serverHost, clientSaslMechanism, handshakeRequestEnable, transportLayer, time, logContext); serverHost, clientSaslMechanism, transportLayer, time, logContext);
} }
// Package private for testing // Package private for testing

View File

@ -68,14 +68,12 @@ public class AlterPartitionRequest extends AbstractRequest {
* @param data The data to be sent. Note that because the version of the * @param data The data to be sent. Note that because the version of the
* request is not known at this time, it is expected that all * request is not known at this time, it is expected that all
* topics have a topic id and a topic name set. * topics have a topic id and a topic name set.
* @param canUseTopicIds True if version 2 and above can be used.
*/ */
public Builder(AlterPartitionRequestData data, boolean canUseTopicIds) { public Builder(AlterPartitionRequestData data) {
super( super(
ApiKeys.ALTER_PARTITION, ApiKeys.ALTER_PARTITION,
ApiKeys.ALTER_PARTITION.oldestVersion(), ApiKeys.ALTER_PARTITION.oldestVersion(),
// Version 1 is the maximum version that can be used without topic ids. ApiKeys.ALTER_PARTITION.latestVersion()
canUseTopicIds ? ApiKeys.ALTER_PARTITION.latestVersion() : 1
); );
this.data = data; this.data = data;
} }

View File

@ -31,7 +31,6 @@ import org.apache.kafka.common.message.ApiVersionsResponseData.SupportedFeatureK
import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.record.RecordVersion;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import java.util.Map; import java.util.Map;
@ -172,7 +171,6 @@ public class ApiVersionsResponse extends AbstractResponse {
} }
public static ApiVersionCollection controllerApiVersions( public static ApiVersionCollection controllerApiVersions(
RecordVersion minRecordVersion,
NodeApiVersions controllerApiVersions, NodeApiVersions controllerApiVersions,
ListenerType listenerType, ListenerType listenerType,
boolean enableUnstableLastVersion, boolean enableUnstableLastVersion,
@ -180,27 +178,23 @@ public class ApiVersionsResponse extends AbstractResponse {
) { ) {
return intersectForwardableApis( return intersectForwardableApis(
listenerType, listenerType,
minRecordVersion,
controllerApiVersions.allSupportedApiVersions(), controllerApiVersions.allSupportedApiVersions(),
enableUnstableLastVersion, enableUnstableLastVersion,
clientTelemetryEnabled); clientTelemetryEnabled);
} }
public static ApiVersionCollection brokerApiVersions( public static ApiVersionCollection brokerApiVersions(
RecordVersion minRecordVersion,
ListenerType listenerType, ListenerType listenerType,
boolean enableUnstableLastVersion, boolean enableUnstableLastVersion,
boolean clientTelemetryEnabled boolean clientTelemetryEnabled
) { ) {
return filterApis( return filterApis(
minRecordVersion,
listenerType, listenerType,
enableUnstableLastVersion, enableUnstableLastVersion,
clientTelemetryEnabled); clientTelemetryEnabled);
} }
public static ApiVersionCollection filterApis( public static ApiVersionCollection filterApis(
RecordVersion minRecordVersion,
ApiMessageType.ListenerType listenerType, ApiMessageType.ListenerType listenerType,
boolean enableUnstableLastVersion, boolean enableUnstableLastVersion,
boolean clientTelemetryEnabled boolean clientTelemetryEnabled
@ -210,11 +204,8 @@ public class ApiVersionsResponse extends AbstractResponse {
// Skip telemetry APIs if client telemetry is disabled. // Skip telemetry APIs if client telemetry is disabled.
if ((apiKey == ApiKeys.GET_TELEMETRY_SUBSCRIPTIONS || apiKey == ApiKeys.PUSH_TELEMETRY) && !clientTelemetryEnabled) if ((apiKey == ApiKeys.GET_TELEMETRY_SUBSCRIPTIONS || apiKey == ApiKeys.PUSH_TELEMETRY) && !clientTelemetryEnabled)
continue; continue;
if (apiKey.minRequiredInterBrokerMagic <= minRecordVersion.value) {
apiKey.toApiVersion(enableUnstableLastVersion).ifPresent(apiKeys::add); apiKey.toApiVersion(enableUnstableLastVersion).ifPresent(apiKeys::add);
} }
}
return apiKeys; return apiKeys;
} }
@ -234,7 +225,6 @@ public class ApiVersionsResponse extends AbstractResponse {
* known range and that of another set. * known range and that of another set.
* *
* @param listenerType the listener type which constrains the set of exposed APIs * @param listenerType the listener type which constrains the set of exposed APIs
* @param minRecordVersion min inter broker magic
* @param activeControllerApiVersions controller ApiVersions * @param activeControllerApiVersions controller ApiVersions
* @param enableUnstableLastVersion whether unstable versions should be advertised or not * @param enableUnstableLastVersion whether unstable versions should be advertised or not
* @param clientTelemetryEnabled whether client telemetry is enabled or not * @param clientTelemetryEnabled whether client telemetry is enabled or not
@ -242,14 +232,12 @@ public class ApiVersionsResponse extends AbstractResponse {
*/ */
public static ApiVersionCollection intersectForwardableApis( public static ApiVersionCollection intersectForwardableApis(
final ApiMessageType.ListenerType listenerType, final ApiMessageType.ListenerType listenerType,
final RecordVersion minRecordVersion,
final Map<ApiKeys, ApiVersion> activeControllerApiVersions, final Map<ApiKeys, ApiVersion> activeControllerApiVersions,
boolean enableUnstableLastVersion, boolean enableUnstableLastVersion,
boolean clientTelemetryEnabled boolean clientTelemetryEnabled
) { ) {
ApiVersionCollection apiKeys = new ApiVersionCollection(); ApiVersionCollection apiKeys = new ApiVersionCollection();
for (ApiKeys apiKey : ApiKeys.apisForListener(listenerType)) { for (ApiKeys apiKey : ApiKeys.apisForListener(listenerType)) {
if (apiKey.minRequiredInterBrokerMagic <= minRecordVersion.value) {
final Optional<ApiVersion> brokerApiVersion = apiKey.toApiVersion(enableUnstableLastVersion); final Optional<ApiVersion> brokerApiVersion = apiKey.toApiVersion(enableUnstableLastVersion);
if (brokerApiVersion.isEmpty()) { if (brokerApiVersion.isEmpty()) {
// Broker does not support this API key. // Broker does not support this API key.
@ -278,7 +266,6 @@ public class ApiVersionsResponse extends AbstractResponse {
apiKeys.add(finalApiVersion.duplicate()); apiKeys.add(finalApiVersion.duplicate());
} }
}
return apiKeys; return apiKeys;
} }

View File

@ -65,11 +65,12 @@ public class OffsetsForLeaderEpochRequest extends AbstractRequest {
return new Builder((short) 3, ApiKeys.OFFSET_FOR_LEADER_EPOCH.latestVersion(), data); return new Builder((short) 3, ApiKeys.OFFSET_FOR_LEADER_EPOCH.latestVersion(), data);
} }
public static Builder forFollower(short version, OffsetForLeaderTopicCollection epochsByPartition, int replicaId) { public static Builder forFollower(OffsetForLeaderTopicCollection epochsByPartition, int replicaId) {
OffsetForLeaderEpochRequestData data = new OffsetForLeaderEpochRequestData(); OffsetForLeaderEpochRequestData data = new OffsetForLeaderEpochRequestData();
data.setReplicaId(replicaId); data.setReplicaId(replicaId);
data.setTopics(epochsByPartition); data.setTopics(epochsByPartition);
return new Builder(version, version, data); // If we introduce new versions, we should gate them behind the appropriate metadata version
return new Builder((short) 4, (short) 4, data);
} }
@Override @Override

View File

@ -110,8 +110,8 @@ public class WriteTxnMarkersRequest extends AbstractRequest {
this.data = data; this.data = data;
} }
public Builder(short version, final List<TxnMarkerEntry> markers) { public Builder(final List<TxnMarkerEntry> markers) {
super(ApiKeys.WRITE_TXN_MARKERS, version); super(ApiKeys.WRITE_TXN_MARKERS, (short) 1); // if we add new versions, gate them behind metadata version
List<WritableTxnMarker> dataMarkers = new ArrayList<>(); List<WritableTxnMarker> dataMarkers = new ArrayList<>();
for (TxnMarkerEntry marker : markers) { for (TxnMarkerEntry marker : markers) {
final Map<String, WritableTxnMarkerTopic> topicMap = new HashMap<>(); final Map<String, WritableTxnMarkerTopic> topicMap = new HashMap<>();

View File

@ -177,7 +177,6 @@ public class SaslClientAuthenticator implements Authenticator {
String servicePrincipal, String servicePrincipal,
String host, String host,
String mechanism, String mechanism,
boolean handshakeRequestEnable,
TransportLayer transportLayer, TransportLayer transportLayer,
Time time, Time time,
LogContext logContext) { LogContext logContext) {
@ -196,7 +195,7 @@ public class SaslClientAuthenticator implements Authenticator {
this.reauthInfo = new ReauthInfo(); this.reauthInfo = new ReauthInfo();
try { try {
setSaslState(handshakeRequestEnable ? SaslState.SEND_APIVERSIONS_REQUEST : SaslState.INITIAL); setSaslState(SaslState.SEND_APIVERSIONS_REQUEST);
// determine client principal from subject for Kerberos to use as authorization id for the SaslClient. // determine client principal from subject for Kerberos to use as authorization id for the SaslClient.
// For other mechanisms, the authenticated principal (username for PLAIN and SCRAM) is used as // For other mechanisms, the authenticated principal (username for PLAIN and SCRAM) is used as

View File

@ -157,7 +157,6 @@ import org.apache.kafka.common.quota.ClientQuotaAlteration;
import org.apache.kafka.common.quota.ClientQuotaEntity; import org.apache.kafka.common.quota.ClientQuotaEntity;
import org.apache.kafka.common.quota.ClientQuotaFilter; import org.apache.kafka.common.quota.ClientQuotaFilter;
import org.apache.kafka.common.quota.ClientQuotaFilterComponent; import org.apache.kafka.common.quota.ClientQuotaFilterComponent;
import org.apache.kafka.common.record.RecordVersion;
import org.apache.kafka.common.requests.AddRaftVoterRequest; import org.apache.kafka.common.requests.AddRaftVoterRequest;
import org.apache.kafka.common.requests.AddRaftVoterResponse; import org.apache.kafka.common.requests.AddRaftVoterResponse;
import org.apache.kafka.common.requests.AlterClientQuotasResponse; import org.apache.kafka.common.requests.AlterClientQuotasResponse;
@ -773,7 +772,7 @@ public class KafkaAdminClientTest {
if (error == Errors.NONE) { if (error == Errors.NONE) {
return new ApiVersionsResponse.Builder(). return new ApiVersionsResponse.Builder().
setApiVersions(ApiVersionsResponse.filterApis( setApiVersions(ApiVersionsResponse.filterApis(
RecordVersion.current(), ApiMessageType.ListenerType.ZK_BROKER, false, false)). ApiMessageType.ListenerType.ZK_BROKER, false, false)).
setSupportedFeatures( setSupportedFeatures(
convertSupportedFeaturesMap(defaultFeatureMetadata().supportedFeatures())). convertSupportedFeaturesMap(defaultFeatureMetadata().supportedFeatures())).
setFinalizedFeatures( setFinalizedFeatures(

View File

@ -166,7 +166,7 @@ public class SaslChannelBuilderTest {
private SaslChannelBuilder createGssapiChannelBuilder(Map<String, JaasContext> jaasContexts, GSSManager gssManager) { private SaslChannelBuilder createGssapiChannelBuilder(Map<String, JaasContext> jaasContexts, GSSManager gssManager) {
SaslChannelBuilder channelBuilder = new SaslChannelBuilder(ConnectionMode.SERVER, jaasContexts, SaslChannelBuilder channelBuilder = new SaslChannelBuilder(ConnectionMode.SERVER, jaasContexts,
SecurityProtocol.SASL_PLAINTEXT, new ListenerName("GSSAPI"), false, "GSSAPI", SecurityProtocol.SASL_PLAINTEXT, new ListenerName("GSSAPI"), false, "GSSAPI",
true, null, null, null, Time.SYSTEM, new LogContext(), defaultApiVersionsSupplier()) { null, null, null, Time.SYSTEM, new LogContext(), defaultApiVersionsSupplier()) {
@Override @Override
protected GSSManager gssManager() { protected GSSManager gssManager() {
@ -205,7 +205,7 @@ public class SaslChannelBuilderTest {
JaasContext jaasContext = new JaasContext("jaasContext", JaasContext.Type.SERVER, jaasConfig, null); JaasContext jaasContext = new JaasContext("jaasContext", JaasContext.Type.SERVER, jaasConfig, null);
Map<String, JaasContext> jaasContexts = Collections.singletonMap(saslMechanism, jaasContext); Map<String, JaasContext> jaasContexts = Collections.singletonMap(saslMechanism, jaasContext);
return new SaslChannelBuilder(ConnectionMode.CLIENT, jaasContexts, securityProtocol, new ListenerName(saslMechanism), return new SaslChannelBuilder(ConnectionMode.CLIENT, jaasContexts, securityProtocol, new ListenerName(saslMechanism),
false, saslMechanism, true, null, false, saslMechanism, null,
null, null, Time.SYSTEM, new LogContext(), defaultApiVersionsSupplier()); null, null, Time.SYSTEM, new LogContext(), defaultApiVersionsSupplier());
} }

View File

@ -61,7 +61,7 @@ class AlterPartitionRequestTest {
request.topics().add(topicData); request.topics().add(topicData);
AlterPartitionRequest.Builder builder = new AlterPartitionRequest.Builder(request, version > 1); AlterPartitionRequest.Builder builder = new AlterPartitionRequest.Builder(request);
AlterPartitionRequest alterPartitionRequest = builder.build(version); AlterPartitionRequest alterPartitionRequest = builder.build(version);
assertEquals(1, alterPartitionRequest.data().topics().size()); assertEquals(1, alterPartitionRequest.data().topics().size());
assertEquals(1, alterPartitionRequest.data().topics().get(0).partitions().size()); assertEquals(1, alterPartitionRequest.data().topics().get(0).partitions().size());

View File

@ -23,11 +23,8 @@ import org.apache.kafka.common.message.ApiMessageType;
import org.apache.kafka.common.message.ApiMessageType.ListenerType; import org.apache.kafka.common.message.ApiMessageType.ListenerType;
import org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion; import org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion;
import org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersionCollection; import org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersionCollection;
import org.apache.kafka.common.message.ApiVersionsResponseData.FinalizedFeatureKey;
import org.apache.kafka.common.message.ApiVersionsResponseData.SupportedFeatureKey; import org.apache.kafka.common.message.ApiVersionsResponseData.SupportedFeatureKey;
import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.record.RecordBatch;
import org.apache.kafka.common.record.RecordVersion;
import org.apache.kafka.common.utils.Utils; import org.apache.kafka.common.utils.Utils;
import org.apache.kafka.test.TestUtils; import org.apache.kafka.test.TestUtils;
@ -103,7 +100,6 @@ public class ApiVersionsResponseTest {
ApiVersionCollection commonResponse = ApiVersionsResponse.intersectForwardableApis( ApiVersionCollection commonResponse = ApiVersionsResponse.intersectForwardableApis(
ApiMessageType.ListenerType.ZK_BROKER, ApiMessageType.ListenerType.ZK_BROKER,
RecordVersion.current(),
activeControllerApiVersions, activeControllerApiVersions,
true, true,
false false
@ -115,63 +111,12 @@ public class ApiVersionsResponseTest {
ApiKeys.JOIN_GROUP.latestVersion(), commonResponse); ApiKeys.JOIN_GROUP.latestVersion(), commonResponse);
} }
@Test
public void shouldCreateApiResponseOnlyWithKeysSupportedByMagicValue() {
ApiVersionsResponse response = new ApiVersionsResponse.Builder().
setThrottleTimeMs(10).
setApiVersions(ApiVersionsResponse.filterApis(
RecordVersion.V1,
ListenerType.ZK_BROKER,
true,
true)).
setSupportedFeatures(Features.emptySupportedFeatures()).
setFinalizedFeatures(Collections.emptyMap()).
setFinalizedFeaturesEpoch(ApiVersionsResponse.UNKNOWN_FINALIZED_FEATURES_EPOCH).
build();
verifyApiKeysForMagic(response, RecordBatch.MAGIC_VALUE_V1);
assertEquals(10, response.throttleTimeMs());
assertTrue(response.data().supportedFeatures().isEmpty());
assertTrue(response.data().finalizedFeatures().isEmpty());
assertEquals(ApiVersionsResponse.UNKNOWN_FINALIZED_FEATURES_EPOCH, response.data().finalizedFeaturesEpoch());
}
@Test
public void shouldReturnFeatureKeysWhenMagicIsCurrentValueAndThrottleMsIsDefaultThrottle() {
ApiVersionsResponse response = new ApiVersionsResponse.Builder().
setThrottleTimeMs(10).
setApiVersions(ApiVersionsResponse.filterApis(
RecordVersion.V1,
ListenerType.ZK_BROKER,
true,
true)).
setSupportedFeatures(Features.supportedFeatures(
Utils.mkMap(Utils.mkEntry("feature", new SupportedVersionRange((short) 1, (short) 4))))).
setFinalizedFeatures(Utils.mkMap(Utils.mkEntry("feature", (short) 3))).
setFinalizedFeaturesEpoch(10L).
build();
verifyApiKeysForMagic(response, RecordBatch.MAGIC_VALUE_V1);
assertEquals(10, response.throttleTimeMs());
assertEquals(1, response.data().supportedFeatures().size());
SupportedFeatureKey sKey = response.data().supportedFeatures().find("feature");
assertNotNull(sKey);
assertEquals(1, sKey.minVersion());
assertEquals(4, sKey.maxVersion());
assertEquals(1, response.data().finalizedFeatures().size());
FinalizedFeatureKey fKey = response.data().finalizedFeatures().find("feature");
assertNotNull(fKey);
assertEquals(3, fKey.minVersionLevel());
assertEquals(3, fKey.maxVersionLevel());
assertEquals(10, response.data().finalizedFeaturesEpoch());
}
@ParameterizedTest @ParameterizedTest
@EnumSource(names = {"ZK_BROKER", "BROKER"}) @EnumSource(names = {"ZK_BROKER", "BROKER"})
public void shouldReturnAllKeysWhenMagicIsCurrentValueAndThrottleMsIsDefaultThrottle(ListenerType listenerType) { public void shouldReturnAllKeysWhenThrottleMsIsDefaultThrottle(ListenerType listenerType) {
ApiVersionsResponse response = new ApiVersionsResponse.Builder(). ApiVersionsResponse response = new ApiVersionsResponse.Builder().
setThrottleTimeMs(AbstractResponse.DEFAULT_THROTTLE_TIME). setThrottleTimeMs(AbstractResponse.DEFAULT_THROTTLE_TIME).
setApiVersions(ApiVersionsResponse.filterApis( setApiVersions(ApiVersionsResponse.filterApis(
RecordVersion.current(),
listenerType, listenerType,
true, true,
true)). true)).
@ -191,7 +136,6 @@ public class ApiVersionsResponseTest {
ApiVersionsResponse response = new ApiVersionsResponse.Builder(). ApiVersionsResponse response = new ApiVersionsResponse.Builder().
setThrottleTimeMs(10). setThrottleTimeMs(10).
setApiVersions(ApiVersionsResponse.filterApis( setApiVersions(ApiVersionsResponse.filterApis(
RecordVersion.V1,
ListenerType.BROKER, ListenerType.BROKER,
true, true,
true)). true)).
@ -207,7 +151,6 @@ public class ApiVersionsResponseTest {
ApiVersionsResponse response = new ApiVersionsResponse.Builder(). ApiVersionsResponse response = new ApiVersionsResponse.Builder().
setThrottleTimeMs(10). setThrottleTimeMs(10).
setApiVersions(ApiVersionsResponse.filterApis( setApiVersions(ApiVersionsResponse.filterApis(
RecordVersion.V1,
ListenerType.BROKER, ListenerType.BROKER,
true, true,
false)). false)).
@ -223,7 +166,6 @@ public class ApiVersionsResponseTest {
ApiVersionsResponse response = new ApiVersionsResponse.Builder(). ApiVersionsResponse response = new ApiVersionsResponse.Builder().
setThrottleTimeMs(AbstractResponse.DEFAULT_THROTTLE_TIME). setThrottleTimeMs(AbstractResponse.DEFAULT_THROTTLE_TIME).
setApiVersions(ApiVersionsResponse.filterApis( setApiVersions(ApiVersionsResponse.filterApis(
RecordVersion.current(),
ListenerType.ZK_BROKER, ListenerType.ZK_BROKER,
true, true,
true)). true)).
@ -278,7 +220,6 @@ public class ApiVersionsResponseTest {
new SupportedVersionRange((short) 0, (short) 1))); new SupportedVersionRange((short) 0, (short) 1)));
ApiVersionsResponse response = new ApiVersionsResponse.Builder(). ApiVersionsResponse response = new ApiVersionsResponse.Builder().
setApiVersions(ApiVersionsResponse.filterApis( setApiVersions(ApiVersionsResponse.filterApis(
RecordVersion.current(),
ListenerType.BROKER, ListenerType.BROKER,
true, true,
true)). true)).

View File

@ -42,18 +42,13 @@ public class OffsetsForLeaderEpochRequestTest {
} }
@Test @Test
public void testDefaultReplicaId() { public void testForFollower() {
for (short version : ApiKeys.OFFSET_FOR_LEADER_EPOCH.allVersions()) { short version = 4;
int replicaId = 1; int replicaId = 1;
OffsetsForLeaderEpochRequest.Builder builder = OffsetsForLeaderEpochRequest.Builder.forFollower( OffsetsForLeaderEpochRequest.Builder builder = OffsetsForLeaderEpochRequest.Builder.forFollower(
version, new OffsetForLeaderTopicCollection(), replicaId); new OffsetForLeaderTopicCollection(), replicaId);
OffsetsForLeaderEpochRequest request = builder.build(); OffsetsForLeaderEpochRequest request = builder.build();
OffsetsForLeaderEpochRequest parsed = OffsetsForLeaderEpochRequest.parse(request.serialize(), version); OffsetsForLeaderEpochRequest parsed = OffsetsForLeaderEpochRequest.parse(request.serialize(), version);
if (version < 3)
assertEquals(OffsetsForLeaderEpochRequest.DEBUGGING_REPLICA_ID, parsed.replicaId());
else
assertEquals(replicaId, parsed.replicaId()); assertEquals(replicaId, parsed.replicaId());
} }
}
} }

View File

@ -1003,7 +1003,7 @@ public class RequestResponseTest {
case DELETE_TOPICS: return createDeleteTopicsRequest(version); case DELETE_TOPICS: return createDeleteTopicsRequest(version);
case DELETE_RECORDS: return createDeleteRecordsRequest(version); case DELETE_RECORDS: return createDeleteRecordsRequest(version);
case INIT_PRODUCER_ID: return createInitPidRequest(version); case INIT_PRODUCER_ID: return createInitPidRequest(version);
case OFFSET_FOR_LEADER_EPOCH: return createLeaderEpochRequestForReplica(version, 1); case OFFSET_FOR_LEADER_EPOCH: return createLeaderEpochRequestForReplica(1);
case ADD_PARTITIONS_TO_TXN: return createAddPartitionsToTxnRequest(version); case ADD_PARTITIONS_TO_TXN: return createAddPartitionsToTxnRequest(version);
case ADD_OFFSETS_TO_TXN: return createAddOffsetsToTxnRequest(version); case ADD_OFFSETS_TO_TXN: return createAddOffsetsToTxnRequest(version);
case END_TXN: return createEndTxnRequest(version); case END_TXN: return createEndTxnRequest(version);
@ -1739,7 +1739,7 @@ public class RequestResponseTest {
.setTopicName("topic1") .setTopicName("topic1")
.setTopicId(Uuid.randomUuid()) .setTopicId(Uuid.randomUuid())
.setPartitions(singletonList(partitionData)))); .setPartitions(singletonList(partitionData))));
return new AlterPartitionRequest.Builder(data, version >= 1).build(version); return new AlterPartitionRequest.Builder(data).build(version);
} }
private AlterPartitionResponse createAlterPartitionResponse(int version) { private AlterPartitionResponse createAlterPartitionResponse(int version) {
@ -2897,9 +2897,9 @@ public class RequestResponseTest {
return OffsetsForLeaderEpochRequest.Builder.forConsumer(epochs).build(); return OffsetsForLeaderEpochRequest.Builder.forConsumer(epochs).build();
} }
private OffsetsForLeaderEpochRequest createLeaderEpochRequestForReplica(short version, int replicaId) { private OffsetsForLeaderEpochRequest createLeaderEpochRequestForReplica(int replicaId) {
OffsetForLeaderTopicCollection epochs = createOffsetForLeaderTopicCollection(); OffsetForLeaderTopicCollection epochs = createOffsetForLeaderTopicCollection();
return OffsetsForLeaderEpochRequest.Builder.forFollower(version, epochs, replicaId).build(); return OffsetsForLeaderEpochRequest.Builder.forFollower(epochs, replicaId).build();
} }
private OffsetsForLeaderEpochResponse createLeaderEpochResponse() { private OffsetsForLeaderEpochResponse createLeaderEpochResponse() {
@ -3002,7 +3002,7 @@ public class RequestResponseTest {
private WriteTxnMarkersRequest createWriteTxnMarkersRequest(short version) { private WriteTxnMarkersRequest createWriteTxnMarkersRequest(short version) {
List<TopicPartition> partitions = singletonList(new TopicPartition("topic", 73)); List<TopicPartition> partitions = singletonList(new TopicPartition("topic", 73));
WriteTxnMarkersRequest.TxnMarkerEntry txnMarkerEntry = new WriteTxnMarkersRequest.TxnMarkerEntry(21L, (short) 42, 73, TransactionResult.ABORT, partitions); WriteTxnMarkersRequest.TxnMarkerEntry txnMarkerEntry = new WriteTxnMarkersRequest.TxnMarkerEntry(21L, (short) 42, 73, TransactionResult.ABORT, partitions);
return new WriteTxnMarkersRequest.Builder(WRITE_TXN_MARKERS.latestVersion(), singletonList(txnMarkerEntry)).build(version); return new WriteTxnMarkersRequest.Builder(singletonList(txnMarkerEntry)).build(version);
} }
private WriteTxnMarkersResponse createWriteTxnMarkersResponse() { private WriteTxnMarkersResponse createWriteTxnMarkersResponse() {

View File

@ -51,7 +51,7 @@ public class WriteTxnMarkersRequestTest {
@Test @Test
public void testConstructor() { public void testConstructor() {
WriteTxnMarkersRequest.Builder builder = new WriteTxnMarkersRequest.Builder(ApiKeys.WRITE_TXN_MARKERS.latestVersion(), markers); WriteTxnMarkersRequest.Builder builder = new WriteTxnMarkersRequest.Builder(markers);
for (short version : ApiKeys.WRITE_TXN_MARKERS.allVersions()) { for (short version : ApiKeys.WRITE_TXN_MARKERS.allVersions()) {
WriteTxnMarkersRequest request = builder.build(version); WriteTxnMarkersRequest request = builder.build(version);
assertEquals(1, request.markers().size()); assertEquals(1, request.markers().size());
@ -66,7 +66,7 @@ public class WriteTxnMarkersRequestTest {
@Test @Test
public void testGetErrorResponse() { public void testGetErrorResponse() {
WriteTxnMarkersRequest.Builder builder = new WriteTxnMarkersRequest.Builder(ApiKeys.WRITE_TXN_MARKERS.latestVersion(), markers); WriteTxnMarkersRequest.Builder builder = new WriteTxnMarkersRequest.Builder(markers);
for (short version : ApiKeys.WRITE_TXN_MARKERS.allVersions()) { for (short version : ApiKeys.WRITE_TXN_MARKERS.allVersions()) {
WriteTxnMarkersRequest request = builder.build(version); WriteTxnMarkersRequest request = builder.build(version);
WriteTxnMarkersResponse errorResponse = WriteTxnMarkersResponse errorResponse =

View File

@ -201,8 +201,7 @@ public abstract class SaslAuthenticatorFailureDelayTest {
String saslMechanism = (String) saslClientConfigs.get(SaslConfigs.SASL_MECHANISM); String saslMechanism = (String) saslClientConfigs.get(SaslConfigs.SASL_MECHANISM);
ChannelBuilder channelBuilder = ChannelBuilders.clientChannelBuilder(securityProtocol, JaasContext.Type.CLIENT, ChannelBuilder channelBuilder = ChannelBuilders.clientChannelBuilder(securityProtocol, JaasContext.Type.CLIENT,
new TestSecurityConfig(clientConfigs), null, saslMechanism, time, true, new TestSecurityConfig(clientConfigs), null, saslMechanism, time, new LogContext());
new LogContext());
this.selector = NetworkTestUtils.createSelector(channelBuilder, time); this.selector = NetworkTestUtils.createSelector(channelBuilder, time);
} }

View File

@ -1617,7 +1617,6 @@ public class SaslAuthenticatorTest {
null, null,
null, null,
"plain", "plain",
false,
null, null,
null, null,
new LogContext() new LogContext()
@ -1673,7 +1672,7 @@ public class SaslAuthenticatorTest {
Map<String, ?> configs = new TestSecurityConfig(saslClientConfigs).values(); Map<String, ?> configs = new TestSecurityConfig(saslClientConfigs).values();
this.channelBuilder = new AlternateSaslChannelBuilder(ConnectionMode.CLIENT, this.channelBuilder = new AlternateSaslChannelBuilder(ConnectionMode.CLIENT,
Collections.singletonMap(saslMechanism, JaasContext.loadClientContext(configs)), securityProtocol, null, Collections.singletonMap(saslMechanism, JaasContext.loadClientContext(configs)), securityProtocol, null,
false, saslMechanism, true, credentialCache, null, time); false, saslMechanism, credentialCache, null, time);
this.channelBuilder.configure(configs); this.channelBuilder.configure(configs);
// initial authentication must succeed // initial authentication must succeed
this.selector = NetworkTestUtils.createSelector(channelBuilder, time); this.selector = NetworkTestUtils.createSelector(channelBuilder, time);
@ -1958,7 +1957,7 @@ public class SaslAuthenticatorTest {
}; };
SaslChannelBuilder serverChannelBuilder = new SaslChannelBuilder(ConnectionMode.SERVER, jaasContexts, SaslChannelBuilder serverChannelBuilder = new SaslChannelBuilder(ConnectionMode.SERVER, jaasContexts,
securityProtocol, listenerName, false, saslMechanism, true, securityProtocol, listenerName, false, saslMechanism,
credentialCache, null, null, time, new LogContext(), apiVersionSupplier); credentialCache, null, null, time, new LogContext(), apiVersionSupplier);
serverChannelBuilder.configure(saslServerConfigs); serverChannelBuilder.configure(saslServerConfigs);
@ -1999,7 +1998,7 @@ public class SaslAuthenticatorTest {
}; };
SaslChannelBuilder serverChannelBuilder = new SaslChannelBuilder(ConnectionMode.SERVER, jaasContexts, SaslChannelBuilder serverChannelBuilder = new SaslChannelBuilder(ConnectionMode.SERVER, jaasContexts,
securityProtocol, listenerName, false, saslMechanism, true, securityProtocol, listenerName, false, saslMechanism,
credentialCache, null, null, time, new LogContext(), apiVersionSupplier) { credentialCache, null, null, time, new LogContext(), apiVersionSupplier) {
@Override @Override
protected SaslServerAuthenticator buildServerAuthenticator(Map<String, ?> configs, protected SaslServerAuthenticator buildServerAuthenticator(Map<String, ?> configs,
@ -2034,7 +2033,7 @@ public class SaslAuthenticatorTest {
final Map<String, JaasContext> jaasContexts = Collections.singletonMap(saslMechanism, jaasContext); final Map<String, JaasContext> jaasContexts = Collections.singletonMap(saslMechanism, jaasContext);
SaslChannelBuilder clientChannelBuilder = new SaslChannelBuilder(ConnectionMode.CLIENT, jaasContexts, SaslChannelBuilder clientChannelBuilder = new SaslChannelBuilder(ConnectionMode.CLIENT, jaasContexts,
securityProtocol, listenerName, false, saslMechanism, true, securityProtocol, listenerName, false, saslMechanism,
null, null, null, time, new LogContext(), null) { null, null, null, time, new LogContext(), null) {
@Override @Override
@ -2047,7 +2046,7 @@ public class SaslAuthenticatorTest {
Subject subject) { Subject subject) {
return new SaslClientAuthenticator(configs, callbackHandler, id, subject, return new SaslClientAuthenticator(configs, callbackHandler, id, subject,
servicePrincipal, serverHost, saslMechanism, true, servicePrincipal, serverHost, saslMechanism,
transportLayer, time, new LogContext()) { transportLayer, time, new LogContext()) {
@Override @Override
protected SaslHandshakeRequest createSaslHandshakeRequest(short version) { protected SaslHandshakeRequest createSaslHandshakeRequest(short version) {
@ -2167,8 +2166,7 @@ public class SaslAuthenticatorTest {
String saslMechanism = (String) saslClientConfigs.get(SaslConfigs.SASL_MECHANISM); String saslMechanism = (String) saslClientConfigs.get(SaslConfigs.SASL_MECHANISM);
this.channelBuilder = ChannelBuilders.clientChannelBuilder(securityProtocol, JaasContext.Type.CLIENT, this.channelBuilder = ChannelBuilders.clientChannelBuilder(securityProtocol, JaasContext.Type.CLIENT,
new TestSecurityConfig(clientConfigs), null, saslMechanism, time, new TestSecurityConfig(clientConfigs), null, saslMechanism, time, new LogContext());
true, new LogContext());
this.selector = NetworkTestUtils.createSelector(channelBuilder, time); this.selector = NetworkTestUtils.createSelector(channelBuilder, time);
} }
@ -2572,10 +2570,10 @@ public class SaslAuthenticatorTest {
public AlternateSaslChannelBuilder(ConnectionMode connectionMode, Map<String, JaasContext> jaasContexts, public AlternateSaslChannelBuilder(ConnectionMode connectionMode, Map<String, JaasContext> jaasContexts,
SecurityProtocol securityProtocol, ListenerName listenerName, boolean isInterBrokerListener, SecurityProtocol securityProtocol, ListenerName listenerName, boolean isInterBrokerListener,
String clientSaslMechanism, boolean handshakeRequestEnable, CredentialCache credentialCache, String clientSaslMechanism, CredentialCache credentialCache,
DelegationTokenCache tokenCache, Time time) { DelegationTokenCache tokenCache, Time time) {
super(connectionMode, jaasContexts, securityProtocol, listenerName, isInterBrokerListener, clientSaslMechanism, super(connectionMode, jaasContexts, securityProtocol, listenerName, isInterBrokerListener, clientSaslMechanism,
handshakeRequestEnable, credentialCache, tokenCache, null, time, new LogContext(), credentialCache, tokenCache, null, time, new LogContext(),
version -> TestUtils.defaultApiVersionsResponse(ApiMessageType.ListenerType.ZK_BROKER)); version -> TestUtils.defaultApiVersionsResponse(ApiMessageType.ListenerType.ZK_BROKER));
} }
@ -2585,10 +2583,10 @@ public class SaslAuthenticatorTest {
TransportLayer transportLayer, Subject subject) { TransportLayer transportLayer, Subject subject) {
if (++numInvocations == 1) if (++numInvocations == 1)
return new SaslClientAuthenticator(configs, callbackHandler, id, subject, servicePrincipal, serverHost, return new SaslClientAuthenticator(configs, callbackHandler, id, subject, servicePrincipal, serverHost,
"DIGEST-MD5", true, transportLayer, time, new LogContext()); "DIGEST-MD5", transportLayer, time, new LogContext());
else else
return new SaslClientAuthenticator(configs, callbackHandler, id, subject, servicePrincipal, serverHost, return new SaslClientAuthenticator(configs, callbackHandler, id, subject, servicePrincipal, serverHost,
"PLAIN", true, transportLayer, time, new LogContext()) { "PLAIN", transportLayer, time, new LogContext()) {
@Override @Override
protected SaslHandshakeRequest createSaslHandshakeRequest(short version) { protected SaslHandshakeRequest createSaslHandshakeRequest(short version) {
return new SaslHandshakeRequest.Builder( return new SaslHandshakeRequest.Builder(

View File

@ -31,7 +31,6 @@ import org.apache.kafka.common.network.NetworkReceive;
import org.apache.kafka.common.network.Send; import org.apache.kafka.common.network.Send;
import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.record.RecordVersion;
import org.apache.kafka.common.record.UnalignedRecords; import org.apache.kafka.common.record.UnalignedRecords;
import org.apache.kafka.common.requests.ApiVersionsResponse; import org.apache.kafka.common.requests.ApiVersionsResponse;
import org.apache.kafka.common.requests.ByteBufferChannel; import org.apache.kafka.common.requests.ByteBufferChannel;
@ -677,7 +676,7 @@ public class TestUtils {
) { ) {
return createApiVersionsResponse( return createApiVersionsResponse(
throttleTimeMs, throttleTimeMs,
ApiVersionsResponse.filterApis(RecordVersion.current(), listenerType, true, true), ApiVersionsResponse.filterApis(listenerType, true, true),
Features.emptySupportedFeatures(), Features.emptySupportedFeatures(),
false false
); );
@ -690,7 +689,7 @@ public class TestUtils {
) { ) {
return createApiVersionsResponse( return createApiVersionsResponse(
throttleTimeMs, throttleTimeMs,
ApiVersionsResponse.filterApis(RecordVersion.current(), listenerType, enableUnstableLastVersion, true), ApiVersionsResponse.filterApis(listenerType, enableUnstableLastVersion, true),
Features.emptySupportedFeatures(), Features.emptySupportedFeatures(),
false false
); );

View File

@ -47,7 +47,6 @@ public class NetworkUtils {
config.interBrokerListenerName(), config.interBrokerListenerName(),
config.saslMechanismInterBrokerProtocol(), config.saslMechanismInterBrokerProtocol(),
time, time,
config.saslInterBrokerHandshakeRequestEnable(),
logContext logContext
); );

View File

@ -41,7 +41,7 @@ import org.apache.kafka.common.requests._
import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.{UNDEFINED_EPOCH, UNDEFINED_EPOCH_OFFSET} import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.{UNDEFINED_EPOCH, UNDEFINED_EPOCH_OFFSET}
import org.apache.kafka.common.utils.Time import org.apache.kafka.common.utils.Time
import org.apache.kafka.metadata.{LeaderAndIsr, LeaderRecoveryState} import org.apache.kafka.metadata.{LeaderAndIsr, LeaderRecoveryState}
import org.apache.kafka.server.common.{MetadataVersion, RequestLocal} import org.apache.kafka.server.common.RequestLocal
import org.apache.kafka.storage.internals.log.{AppendOrigin, FetchDataInfo, LeaderHwChange, LogAppendInfo, LogOffsetMetadata, LogOffsetSnapshot, LogOffsetsListener, LogReadInfo, LogStartOffsetIncrementReason, OffsetResultHolder, VerificationGuard} import org.apache.kafka.storage.internals.log.{AppendOrigin, FetchDataInfo, LeaderHwChange, LogAppendInfo, LogOffsetMetadata, LogOffsetSnapshot, LogOffsetsListener, LogReadInfo, LogStartOffsetIncrementReason, OffsetResultHolder, VerificationGuard}
import org.apache.kafka.server.metrics.KafkaMetricsGroup import org.apache.kafka.server.metrics.KafkaMetricsGroup
import org.apache.kafka.server.purgatory.{DelayedOperationPurgatory, TopicPartitionOperationKey} import org.apache.kafka.server.purgatory.{DelayedOperationPurgatory, TopicPartitionOperationKey}
@ -154,7 +154,6 @@ object Partition {
new Partition(topicPartition, new Partition(topicPartition,
_topicId = topicId, _topicId = topicId,
replicaLagTimeMaxMs = replicaManager.config.replicaLagTimeMaxMs, replicaLagTimeMaxMs = replicaManager.config.replicaLagTimeMaxMs,
interBrokerProtocolVersion = replicaManager.metadataCache.metadataVersion(),
localBrokerId = replicaManager.config.brokerId, localBrokerId = replicaManager.config.brokerId,
localBrokerEpochSupplier = replicaManager.brokerEpochSupplier, localBrokerEpochSupplier = replicaManager.brokerEpochSupplier,
time = time, time = time,
@ -307,7 +306,6 @@ case class CommittedPartitionState(
*/ */
class Partition(val topicPartition: TopicPartition, class Partition(val topicPartition: TopicPartition,
val replicaLagTimeMaxMs: Long, val replicaLagTimeMaxMs: Long,
interBrokerProtocolVersion: MetadataVersion,
localBrokerId: Int, localBrokerId: Int,
localBrokerEpochSupplier: () => Long, localBrokerEpochSupplier: () => Long,
time: Time, time: Time,
@ -1403,7 +1401,7 @@ class Partition(val topicPartition: TopicPartition,
} }
val info = leaderLog.appendAsLeader(records, leaderEpoch = this.leaderEpoch, origin, val info = leaderLog.appendAsLeader(records, leaderEpoch = this.leaderEpoch, origin,
interBrokerProtocolVersion, requestLocal, verificationGuard) requestLocal, verificationGuard)
// we may need to increment high watermark since ISR could be down to 1 // we may need to increment high watermark since ISR could be down to 1
(info, maybeIncrementLeaderHW(leaderLog)) (info, maybeIncrementLeaderHW(leaderLog))

View File

@ -128,7 +128,6 @@ class ControllerChannelManager(controllerEpoch: () => Int,
controllerToBrokerListenerName, controllerToBrokerListenerName,
config.saslMechanismInterBrokerProtocol, config.saslMechanismInterBrokerProtocol,
time, time,
config.saslInterBrokerHandshakeRequestEnable,
logContext logContext
) )
val reconfigurableChannelBuilder = channelBuilder match { val reconfigurableChannelBuilder = channelBuilder match {
@ -516,12 +515,7 @@ abstract class AbstractControllerBrokerRequestBatch(config: KafkaConfig,
val leaderAndIsrRequestVersion: Short = val leaderAndIsrRequestVersion: Short =
if (metadataVersion.isAtLeast(IBP_3_4_IV0)) 7 if (metadataVersion.isAtLeast(IBP_3_4_IV0)) 7
else if (metadataVersion.isAtLeast(IBP_3_2_IV0)) 6 else if (metadataVersion.isAtLeast(IBP_3_2_IV0)) 6
else if (metadataVersion.isAtLeast(IBP_2_8_IV1)) 5 else 5
else if (metadataVersion.isAtLeast(IBP_2_4_IV1)) 4
else if (metadataVersion.isAtLeast(IBP_2_4_IV0)) 3
else if (metadataVersion.isAtLeast(IBP_2_2_IV0)) 2
else if (metadataVersion.isAtLeast(IBP_1_0_IV0)) 1
else 0
leaderAndIsrRequestMap.foreachEntry { (broker, leaderAndIsrPartitionStates) => leaderAndIsrRequestMap.foreachEntry { (broker, leaderAndIsrPartitionStates) =>
if (metadataInstance.liveOrShuttingDownBrokerIds.contains(broker)) { if (metadataInstance.liveOrShuttingDownBrokerIds.contains(broker)) {
@ -579,14 +573,7 @@ abstract class AbstractControllerBrokerRequestBatch(config: KafkaConfig,
val metadataVersion = metadataVersionProvider.apply() val metadataVersion = metadataVersionProvider.apply()
val updateMetadataRequestVersion: Short = val updateMetadataRequestVersion: Short =
if (metadataVersion.isAtLeast(IBP_3_4_IV0)) 8 if (metadataVersion.isAtLeast(IBP_3_4_IV0)) 8
else if (metadataVersion.isAtLeast(IBP_2_8_IV1)) 7 else 7
else if (metadataVersion.isAtLeast(IBP_2_4_IV1)) 6
else if (metadataVersion.isAtLeast(IBP_2_2_IV0)) 5
else if (metadataVersion.isAtLeast(IBP_1_0_IV0)) 4
else if (metadataVersion.isAtLeast(IBP_0_10_2_IV0)) 3
else if (metadataVersion.isAtLeast(IBP_0_10_0_IV1)) 2
else if (metadataVersion.isAtLeast(IBP_0_9_0)) 1
else 0
val liveBrokers = metadataInstance.liveOrShuttingDownBrokers.iterator.map { broker => val liveBrokers = metadataInstance.liveOrShuttingDownBrokers.iterator.map { broker =>
val endpoints = if (updateMetadataRequestVersion == 0) { val endpoints = if (updateMetadataRequestVersion == 0) {
@ -648,10 +635,7 @@ abstract class AbstractControllerBrokerRequestBatch(config: KafkaConfig,
val metadataVersion = metadataVersionProvider.apply() val metadataVersion = metadataVersionProvider.apply()
val stopReplicaRequestVersion: Short = val stopReplicaRequestVersion: Short =
if (metadataVersion.isAtLeast(IBP_3_4_IV0)) 4 if (metadataVersion.isAtLeast(IBP_3_4_IV0)) 4
else if (metadataVersion.isAtLeast(IBP_2_6_IV0)) 3 else 3
else if (metadataVersion.isAtLeast(IBP_2_4_IV1)) 2
else if (metadataVersion.isAtLeast(IBP_2_2_IV0)) 1
else 0
def responseCallback(brokerId: Int, isPartitionDeleted: TopicPartition => Boolean) def responseCallback(brokerId: Int, isPartitionDeleted: TopicPartition => Boolean)
(response: AbstractResponse): Unit = { (response: AbstractResponse): Unit = {

View File

@ -113,7 +113,6 @@ class KafkaController(val config: KafkaConfig,
private val brokerInfo = initialBrokerInfo private val brokerInfo = initialBrokerInfo
@volatile private var _brokerEpoch = initialBrokerEpoch @volatile private var _brokerEpoch = initialBrokerEpoch
private val isAlterPartitionEnabled = config.interBrokerProtocolVersion.isAlterPartitionSupported
private val stateChangeLogger = new StateChangeLogger(config.brokerId, inControllerContext = true, None) private val stateChangeLogger = new StateChangeLogger(config.brokerId, inControllerContext = true, None)
val controllerContext = new ControllerContext val controllerContext = new ControllerContext
var controllerChannelManager = new ControllerChannelManager( var controllerChannelManager = new ControllerChannelManager(
@ -265,7 +264,7 @@ class KafkaController(val config: KafkaConfig,
* This ensures another controller election will be triggered and there will always be an actively serving controller * This ensures another controller election will be triggered and there will always be an actively serving controller
*/ */
private def onControllerFailover(): Unit = { private def onControllerFailover(): Unit = {
maybeSetupFeatureVersioning() enableFeatureVersioning()
info("Registering handlers") info("Registering handlers")
@ -437,47 +436,6 @@ class KafkaController(val config: KafkaConfig,
} }
} }
/**
* Disables the feature versioning system (KIP-584).
*
* Sets up the FeatureZNode with disabled status. This status means the feature versioning system
* (KIP-584) is disabled, and, the finalized features stored in the FeatureZNode are not relevant.
* This status should be written by the controller to the FeatureZNode only when the broker
* IBP config is less than IBP_2_7_IV0.
*
* NOTE:
* 1. When this method returns, existing finalized features (if any) will be cleared from the
* FeatureZNode.
* 2. This method, unlike enableFeatureVersioning() need not wait for the FinalizedFeatureCache
* to be updated, because, such updates to the cache (via FinalizedFeatureChangeListener)
* are disabled when IBP config is < than IBP_2_7_IV0.
*/
private def disableFeatureVersioning(): Unit = {
val newNode = FeatureZNode(config.interBrokerProtocolVersion, FeatureZNodeStatus.Disabled, Map.empty[String, Short])
val (mayBeFeatureZNodeBytes, version) = zkClient.getDataAndVersion(FeatureZNode.path)
if (version == ZkVersion.UnknownVersion) {
createFeatureZNode(newNode)
} else {
val existingFeatureZNode = FeatureZNode.decode(mayBeFeatureZNodeBytes.get)
if (existingFeatureZNode.status == FeatureZNodeStatus.Disabled &&
existingFeatureZNode.features.nonEmpty) {
warn(s"FeatureZNode at path: ${FeatureZNode.path} with disabled status" +
s" contains non-empty features: ${existingFeatureZNode.features}")
}
if (!newNode.equals(existingFeatureZNode)) {
updateFeatureZNode(newNode)
}
}
}
private def maybeSetupFeatureVersioning(): Unit = {
if (config.isFeatureVersioningSupported) {
enableFeatureVersioning()
} else {
disableFeatureVersioning()
}
}
private def scheduleAutoLeaderRebalanceTask(delay: Long, unit: TimeUnit): Unit = { private def scheduleAutoLeaderRebalanceTask(delay: Long, unit: TimeUnit): Unit = {
kafkaScheduler.scheduleOnce("auto-leader-rebalance-task", kafkaScheduler.scheduleOnce("auto-leader-rebalance-task",
() => eventManager.put(AutoPreferredReplicaLeaderElection), () => eventManager.put(AutoPreferredReplicaLeaderElection),
@ -503,8 +461,6 @@ class KafkaController(val config: KafkaConfig,
// stop token expiry check scheduler // stop token expiry check scheduler
tokenCleanScheduler.shutdown() tokenCleanScheduler.shutdown()
// de-register partition ISR listener for on-going partition reassignment task
unregisterPartitionReassignmentIsrChangeHandlers()
// shutdown partition state machine // shutdown partition state machine
partitionStateMachine.shutdown() partitionStateMachine.shutdown()
zkClient.unregisterZNodeChildChangeHandler(topicChangeHandler.path) zkClient.unregisterZNodeChildChangeHandler(topicChangeHandler.path)
@ -828,11 +784,6 @@ class KafkaController(val config: KafkaConfig,
stopRemovedReplicasOfReassignedPartition(topicPartition, unneededReplicas) stopRemovedReplicasOfReassignedPartition(topicPartition, unneededReplicas)
} }
if (!isAlterPartitionEnabled) {
val reassignIsrChangeHandler = new PartitionReassignmentIsrChangeHandler(eventManager, topicPartition)
zkClient.registerZNodeChangeHandler(reassignIsrChangeHandler)
}
controllerContext.partitionsBeingReassigned.add(topicPartition) controllerContext.partitionsBeingReassigned.add(topicPartition)
} }
@ -1121,22 +1072,9 @@ class KafkaController(val config: KafkaConfig,
} }
} }
private def unregisterPartitionReassignmentIsrChangeHandlers(): Unit = {
if (!isAlterPartitionEnabled) {
controllerContext.partitionsBeingReassigned.foreach { tp =>
val path = TopicPartitionStateZNode.path(tp)
zkClient.unregisterZNodeChangeHandler(path)
}
}
}
private def removePartitionFromReassigningPartitions(topicPartition: TopicPartition, private def removePartitionFromReassigningPartitions(topicPartition: TopicPartition,
assignment: ReplicaAssignment): Unit = { assignment: ReplicaAssignment): Unit = {
if (controllerContext.partitionsBeingReassigned.contains(topicPartition)) { if (controllerContext.partitionsBeingReassigned.contains(topicPartition)) {
if (!isAlterPartitionEnabled) {
val path = TopicPartitionStateZNode.path(topicPartition)
zkClient.unregisterZNodeChangeHandler(path)
}
maybeRemoveFromZkReassignment((tp, replicas) => tp == topicPartition && replicas == assignment.replicas) maybeRemoveFromZkReassignment((tp, replicas) => tp == topicPartition && replicas == assignment.replicas)
controllerContext.partitionsBeingReassigned.remove(topicPartition) controllerContext.partitionsBeingReassigned.remove(topicPartition)
} else { } else {
@ -1566,7 +1504,6 @@ class KafkaController(val config: KafkaConfig,
// of the cache are compatible with the supported features of each broker. // of the cache are compatible with the supported features of each broker.
brokersAndEpochs.partition { brokersAndEpochs.partition {
case (broker, _) => case (broker, _) =>
!config.isFeatureVersioningSupported ||
!featureCache.getFeatureOption.exists( !featureCache.getFeatureOption.exists(
latestFinalizedFeatures => latestFinalizedFeatures =>
BrokerFeatures.hasIncompatibleFeatures(broker.features, BrokerFeatures.hasIncompatibleFeatures(broker.features,
@ -1677,12 +1614,9 @@ class KafkaController(val config: KafkaConfig,
private def processTopicIds(topicIdAssignments: Set[TopicIdReplicaAssignment]): Unit = { private def processTopicIds(topicIdAssignments: Set[TopicIdReplicaAssignment]): Unit = {
// Create topic IDs for topics missing them if we are using topic IDs // Create topic IDs for topics missing them if we are using topic IDs
// Otherwise, maintain what we have in the topicZNode val updatedTopicIdAssignments = {
val updatedTopicIdAssignments = if (config.usesTopicId) {
val (withTopicIds, withoutTopicIds) = topicIdAssignments.partition(_.topicId.isDefined) val (withTopicIds, withoutTopicIds) = topicIdAssignments.partition(_.topicId.isDefined)
withTopicIds ++ zkClient.setTopicIds(withoutTopicIds, controllerContext.epochZkVersion) withTopicIds ++ zkClient.setTopicIds(withoutTopicIds, controllerContext.epochZkVersion)
} else {
topicIdAssignments
} }
// Add topic IDs to controller context // Add topic IDs to controller context

View File

@ -43,7 +43,6 @@ import org.apache.kafka.common.{TopicIdPartition, TopicPartition}
import org.apache.kafka.coordinator.group.{OffsetAndMetadata, OffsetConfig} import org.apache.kafka.coordinator.group.{OffsetAndMetadata, OffsetConfig}
import org.apache.kafka.coordinator.group.generated.{GroupMetadataValue, OffsetCommitKey, OffsetCommitValue, GroupMetadataKey => GroupMetadataKeyData} import org.apache.kafka.coordinator.group.generated.{GroupMetadataValue, OffsetCommitKey, OffsetCommitValue, GroupMetadataKey => GroupMetadataKeyData}
import org.apache.kafka.server.common.{MetadataVersion, RequestLocal} import org.apache.kafka.server.common.{MetadataVersion, RequestLocal}
import org.apache.kafka.server.common.MetadataVersion.{IBP_0_10_1_IV0, IBP_2_1_IV0, IBP_2_1_IV1, IBP_2_3_IV0}
import org.apache.kafka.server.metrics.KafkaMetricsGroup import org.apache.kafka.server.metrics.KafkaMetricsGroup
import org.apache.kafka.server.storage.log.FetchIsolation import org.apache.kafka.server.storage.log.FetchIsolation
import org.apache.kafka.server.util.KafkaScheduler import org.apache.kafka.server.util.KafkaScheduler
@ -245,7 +244,7 @@ class GroupMetadataManager(brokerId: Int,
val timestampType = TimestampType.CREATE_TIME val timestampType = TimestampType.CREATE_TIME
val timestamp = time.milliseconds() val timestamp = time.milliseconds()
val key = GroupMetadataManager.groupMetadataKey(group.groupId) val key = GroupMetadataManager.groupMetadataKey(group.groupId)
val value = GroupMetadataManager.groupMetadataValue(group, groupAssignment, interBrokerProtocolVersion) val value = GroupMetadataManager.groupMetadataValue(group, groupAssignment)
val records = { val records = {
val buffer = ByteBuffer.allocate(AbstractRecords.estimateSizeInBytes(RecordBatch.CURRENT_MAGIC_VALUE, compression.`type`(), val buffer = ByteBuffer.allocate(AbstractRecords.estimateSizeInBytes(RecordBatch.CURRENT_MAGIC_VALUE, compression.`type`(),
@ -350,7 +349,7 @@ class GroupMetadataManager(brokerId: Int,
val records = filteredOffsetMetadata.map { case (topicIdPartition, offsetAndMetadata) => val records = filteredOffsetMetadata.map { case (topicIdPartition, offsetAndMetadata) =>
val key = GroupMetadataManager.offsetCommitKey(groupId, topicIdPartition.topicPartition) val key = GroupMetadataManager.offsetCommitKey(groupId, topicIdPartition.topicPartition)
val value = GroupMetadataManager.offsetCommitValue(offsetAndMetadata, interBrokerProtocolVersion) val value = GroupMetadataManager.offsetCommitValue(offsetAndMetadata)
new SimpleRecord(timestamp, key, value) new SimpleRecord(timestamp, key, value)
} }
val buffer = ByteBuffer.allocate(AbstractRecords.estimateSizeInBytes(magicValue, compression.`type`(), records.asJava)) val buffer = ByteBuffer.allocate(AbstractRecords.estimateSizeInBytes(magicValue, compression.`type`(), records.asJava))
@ -1084,17 +1083,15 @@ object GroupMetadataManager {
* Generates the payload for offset commit message from given offset and metadata * Generates the payload for offset commit message from given offset and metadata
* *
* @param offsetAndMetadata consumer's current offset and metadata * @param offsetAndMetadata consumer's current offset and metadata
* @param metadataVersion the api version * @param maxVersion the highest version allowed, we may use a lower version for compatibility reasons
* we serialize with the highest supported non-flexible version until a tagged field is introduced
* or the version is bumped.
* @return payload for offset commit message * @return payload for offset commit message
*/ */
def offsetCommitValue(offsetAndMetadata: OffsetAndMetadata, def offsetCommitValue(offsetAndMetadata: OffsetAndMetadata, maxVersion: Short = 3): Array[Byte] = {
metadataVersion: MetadataVersion): Array[Byte] = {
val version = val version =
if (metadataVersion.isLessThan(IBP_2_1_IV0) || offsetAndMetadata.expireTimestampMs.isPresent) 1.toShort if (offsetAndMetadata.expireTimestampMs.isPresent) Math.min(1, maxVersion).toShort
else if (metadataVersion.isLessThan(IBP_2_1_IV1)) 2.toShort else maxVersion
// Serialize with the highest supported non-flexible version
// until a tagged field is introduced or the version is bumped.
else 3.toShort
MessageUtil.toVersionPrefixedBytes(version, new OffsetCommitValue() MessageUtil.toVersionPrefixedBytes(version, new OffsetCommitValue()
.setOffset(offsetAndMetadata.committedOffset) .setOffset(offsetAndMetadata.committedOffset)
.setMetadata(offsetAndMetadata.metadata) .setMetadata(offsetAndMetadata.metadata)
@ -1111,21 +1108,14 @@ object GroupMetadataManager {
* *
* @param groupMetadata current group metadata * @param groupMetadata current group metadata
* @param assignment the assignment for the rebalancing generation * @param assignment the assignment for the rebalancing generation
* @param metadataVersion the api version * @param version the version to serialize it with, the default is `3`, the highest supported non-flexible version
* until a tagged field is introduced or the version is bumped. The default should always be used
* outside of tests
* @return payload for offset commit message * @return payload for offset commit message
*/ */
def groupMetadataValue(groupMetadata: GroupMetadata, def groupMetadataValue(groupMetadata: GroupMetadata,
assignment: Map[String, Array[Byte]], assignment: Map[String, Array[Byte]],
metadataVersion: MetadataVersion): Array[Byte] = { version: Short = 3): Array[Byte] = {
val version =
if (metadataVersion.isLessThan(IBP_0_10_1_IV0)) 0.toShort
else if (metadataVersion.isLessThan(IBP_2_1_IV0)) 1.toShort
else if (metadataVersion.isLessThan(IBP_2_3_IV0)) 2.toShort
// Serialize with the highest supported non-flexible version
// until a tagged field is introduced or the version is bumped.
else 3.toShort
MessageUtil.toVersionPrefixedBytes(version, new GroupMetadataValue() MessageUtil.toVersionPrefixedBytes(version, new GroupMetadataValue()
.setProtocolType(groupMetadata.protocolType.getOrElse("")) .setProtocolType(groupMetadata.protocolType.getOrElse(""))
.setGeneration(groupMetadata.generationId) .setGeneration(groupMetadata.generationId)

View File

@ -62,7 +62,6 @@ object TransactionMarkerChannelManager {
config.interBrokerListenerName, config.interBrokerListenerName,
config.saslMechanismInterBrokerProtocol, config.saslMechanismInterBrokerProtocol,
time, time,
config.saslInterBrokerHandshakeRequestEnable,
logContext logContext
) )
channelBuilder match { channelBuilder match {
@ -256,9 +255,7 @@ class TransactionMarkerChannelManager(
}.filter { case (_, entries) => !entries.isEmpty }.map { case (node, entries) => }.filter { case (_, entries) => !entries.isEmpty }.map { case (node, entries) =>
val markersToSend = entries.asScala.map(_.txnMarkerEntry).asJava val markersToSend = entries.asScala.map(_.txnMarkerEntry).asJava
val requestCompletionHandler = new TransactionMarkerRequestCompletionHandler(node.id, txnStateManager, this, entries) val requestCompletionHandler = new TransactionMarkerRequestCompletionHandler(node.id, txnStateManager, this, entries)
val request = new WriteTxnMarkersRequest.Builder( val request = new WriteTxnMarkersRequest.Builder(markersToSend)
metadataCache.metadataVersion().writeTxnMarkersRequestVersion(), markersToSend
)
new RequestAndCompletionHandler( new RequestAndCompletionHandler(
currentTimeMs, currentTimeMs,

View File

@ -29,7 +29,7 @@ import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_
import org.apache.kafka.common.requests.ProduceResponse.RecordError import org.apache.kafka.common.requests.ProduceResponse.RecordError
import org.apache.kafka.common.utils.{PrimitiveRef, Time, Utils} import org.apache.kafka.common.utils.{PrimitiveRef, Time, Utils}
import org.apache.kafka.common.{InvalidRecordException, KafkaException, TopicPartition, Uuid} import org.apache.kafka.common.{InvalidRecordException, KafkaException, TopicPartition, Uuid}
import org.apache.kafka.server.common.{MetadataVersion, OffsetAndEpoch, RequestLocal} import org.apache.kafka.server.common.{OffsetAndEpoch, RequestLocal}
import org.apache.kafka.server.log.remote.metadata.storage.TopicBasedRemoteLogMetadataManagerConfig import org.apache.kafka.server.log.remote.metadata.storage.TopicBasedRemoteLogMetadataManagerConfig
import org.apache.kafka.server.metrics.KafkaMetricsGroup import org.apache.kafka.server.metrics.KafkaMetricsGroup
import org.apache.kafka.server.record.BrokerCompressionType import org.apache.kafka.server.record.BrokerCompressionType
@ -698,7 +698,6 @@ class UnifiedLog(@volatile var logStartOffset: Long,
* *
* @param records The records to append * @param records The records to append
* @param origin Declares the origin of the append which affects required validations * @param origin Declares the origin of the append which affects required validations
* @param interBrokerProtocolVersion Inter-broker message protocol version
* @param requestLocal request local instance * @param requestLocal request local instance
* @throws KafkaStorageException If the append fails due to an I/O error. * @throws KafkaStorageException If the append fails due to an I/O error.
* @return Information about the appended messages including the first and last offset. * @return Information about the appended messages including the first and last offset.
@ -706,11 +705,10 @@ class UnifiedLog(@volatile var logStartOffset: Long,
def appendAsLeader(records: MemoryRecords, def appendAsLeader(records: MemoryRecords,
leaderEpoch: Int, leaderEpoch: Int,
origin: AppendOrigin = AppendOrigin.CLIENT, origin: AppendOrigin = AppendOrigin.CLIENT,
interBrokerProtocolVersion: MetadataVersion = MetadataVersion.latestProduction,
requestLocal: RequestLocal = RequestLocal.noCaching, requestLocal: RequestLocal = RequestLocal.noCaching,
verificationGuard: VerificationGuard = VerificationGuard.SENTINEL): LogAppendInfo = { verificationGuard: VerificationGuard = VerificationGuard.SENTINEL): LogAppendInfo = {
val validateAndAssignOffsets = origin != AppendOrigin.RAFT_LEADER val validateAndAssignOffsets = origin != AppendOrigin.RAFT_LEADER
append(records, origin, interBrokerProtocolVersion, validateAndAssignOffsets, leaderEpoch, Some(requestLocal), verificationGuard, ignoreRecordSize = false) append(records, origin, validateAndAssignOffsets, leaderEpoch, Some(requestLocal), verificationGuard, ignoreRecordSize = false)
} }
/** /**
@ -721,7 +719,7 @@ class UnifiedLog(@volatile var logStartOffset: Long,
* Also see #appendAsLeader. * Also see #appendAsLeader.
*/ */
private[log] def appendAsLeaderWithRecordVersion(records: MemoryRecords, leaderEpoch: Int, recordVersion: RecordVersion): LogAppendInfo = { private[log] def appendAsLeaderWithRecordVersion(records: MemoryRecords, leaderEpoch: Int, recordVersion: RecordVersion): LogAppendInfo = {
append(records, AppendOrigin.CLIENT, MetadataVersion.latestProduction, true, leaderEpoch, Some(RequestLocal.noCaching), append(records, AppendOrigin.CLIENT, true, leaderEpoch, Some(RequestLocal.noCaching),
VerificationGuard.SENTINEL, ignoreRecordSize = false, recordVersion.value) VerificationGuard.SENTINEL, ignoreRecordSize = false, recordVersion.value)
} }
@ -735,7 +733,6 @@ class UnifiedLog(@volatile var logStartOffset: Long,
def appendAsFollower(records: MemoryRecords): LogAppendInfo = { def appendAsFollower(records: MemoryRecords): LogAppendInfo = {
append(records, append(records,
origin = AppendOrigin.REPLICATION, origin = AppendOrigin.REPLICATION,
interBrokerProtocolVersion = MetadataVersion.latestProduction,
validateAndAssignOffsets = false, validateAndAssignOffsets = false,
leaderEpoch = -1, leaderEpoch = -1,
requestLocal = None, requestLocal = None,
@ -752,7 +749,6 @@ class UnifiedLog(@volatile var logStartOffset: Long,
* *
* @param records The log records to append * @param records The log records to append
* @param origin Declares the origin of the append which affects required validations * @param origin Declares the origin of the append which affects required validations
* @param interBrokerProtocolVersion Inter-broker message protocol version
* @param validateAndAssignOffsets Should the log assign offsets to this message set or blindly apply what it is given * @param validateAndAssignOffsets Should the log assign offsets to this message set or blindly apply what it is given
* @param leaderEpoch The partition's leader epoch which will be applied to messages when offsets are assigned on the leader * @param leaderEpoch The partition's leader epoch which will be applied to messages when offsets are assigned on the leader
* @param requestLocal The request local instance if validateAndAssignOffsets is true * @param requestLocal The request local instance if validateAndAssignOffsets is true
@ -764,7 +760,6 @@ class UnifiedLog(@volatile var logStartOffset: Long,
*/ */
private def append(records: MemoryRecords, private def append(records: MemoryRecords,
origin: AppendOrigin, origin: AppendOrigin,
interBrokerProtocolVersion: MetadataVersion,
validateAndAssignOffsets: Boolean, validateAndAssignOffsets: Boolean,
leaderEpoch: Int, leaderEpoch: Int,
requestLocal: Option[RequestLocal], requestLocal: Option[RequestLocal],
@ -805,8 +800,7 @@ class UnifiedLog(@volatile var logStartOffset: Long,
config.messageTimestampBeforeMaxMs, config.messageTimestampBeforeMaxMs,
config.messageTimestampAfterMaxMs, config.messageTimestampAfterMaxMs,
leaderEpoch, leaderEpoch,
origin, origin
interBrokerProtocolVersion
) )
validator.validateMessagesAndAssignOffsets(offset, validator.validateMessagesAndAssignOffsets(offset,
validatorMetricsRecorder, validatorMetricsRecorder,

View File

@ -243,7 +243,6 @@ class KafkaRaftManager[T](
controllerListenerName, controllerListenerName,
config.saslMechanismControllerProtocol, config.saslMechanismControllerProtocol,
time, time,
config.saslInterBrokerHandshakeRequestEnable,
logContext logContext
) )

View File

@ -94,8 +94,6 @@ abstract class AbstractFetcherThread(name: String,
protected def endOffsetForEpoch(topicPartition: TopicPartition, epoch: Int): Option[OffsetAndEpoch] protected def endOffsetForEpoch(topicPartition: TopicPartition, epoch: Int): Option[OffsetAndEpoch]
protected val isOffsetForLeaderEpochSupported: Boolean
override def shutdown(): Unit = { override def shutdown(): Unit = {
initiateShutdown() initiateShutdown()
inLock(partitionMapLock) { inLock(partitionMapLock) {
@ -151,7 +149,7 @@ abstract class AbstractFetcherThread(name: String,
partitionStates.partitionStateMap.forEach { (tp, state) => partitionStates.partitionStateMap.forEach { (tp, state) =>
if (state.isTruncating) { if (state.isTruncating) {
latestEpoch(tp) match { latestEpoch(tp) match {
case Some(epoch) if isOffsetForLeaderEpochSupported => case Some(epoch) =>
partitionsWithEpochs += tp -> new EpochData() partitionsWithEpochs += tp -> new EpochData()
.setPartition(tp.partition) .setPartition(tp.partition)
.setCurrentLeaderEpoch(state.currentLeaderEpoch) .setCurrentLeaderEpoch(state.currentLeaderEpoch)

View File

@ -229,9 +229,6 @@ class DefaultAlterPartitionManager(
* supported by the controller. The final decision is taken when the AlterPartitionRequest * supported by the controller. The final decision is taken when the AlterPartitionRequest
* is built in the network client based on the advertised api versions of the controller. * is built in the network client based on the advertised api versions of the controller.
* *
* We could use version 2 or above if all the pending changes have an topic id defined;
* otherwise we must use version 1 or below.
*
* @return A tuple containing the AlterPartitionRequest.Builder and a mapping from * @return A tuple containing the AlterPartitionRequest.Builder and a mapping from
* topic id to topic name. This mapping is used in the response handling. * topic id to topic name. This mapping is used in the response handling.
*/ */
@ -245,9 +242,6 @@ class DefaultAlterPartitionManager(
// the metadata cache is updated after the partition state so it might not know // the metadata cache is updated after the partition state so it might not know
// yet about a topic id already used here. // yet about a topic id already used here.
val topicNamesByIds = mutable.HashMap[Uuid, String]() val topicNamesByIds = mutable.HashMap[Uuid, String]()
// We can use topic ids only if all the pending changed have one defined and
// we use IBP 2.8 or above.
var canUseTopicIds = metadataVersion.isTopicIdsSupported
val message = new AlterPartitionRequestData() val message = new AlterPartitionRequestData()
.setBrokerId(brokerId) .setBrokerId(brokerId)
@ -255,7 +249,6 @@ class DefaultAlterPartitionManager(
inflightAlterPartitionItems.groupBy(_.topicIdPartition.topic).foreach { case (topicName, items) => inflightAlterPartitionItems.groupBy(_.topicIdPartition.topic).foreach { case (topicName, items) =>
val topicId = items.head.topicIdPartition.topicId val topicId = items.head.topicIdPartition.topicId
canUseTopicIds &= topicId != Uuid.ZERO_UUID
topicNamesByIds(topicId) = topicName topicNamesByIds(topicId) = topicName
// Both the topic name and the topic id are set here because at this stage // Both the topic name and the topic id are set here because at this stage
@ -280,8 +273,7 @@ class DefaultAlterPartitionManager(
} }
} }
// If we cannot use topic ids, the builder will ensure that no version higher than 1 is used. (new AlterPartitionRequest.Builder(message), topicNamesByIds)
(new AlterPartitionRequest.Builder(message, canUseTopicIds), topicNamesByIds)
} }
private def handleAlterPartitionResponse( private def handleAlterPartitionResponse(

View File

@ -150,14 +150,12 @@ class DefaultApiVersionManager(
} }
val apiVersions = if (controllerApiVersions.isDefined) { val apiVersions = if (controllerApiVersions.isDefined) {
ApiVersionsResponse.controllerApiVersions( ApiVersionsResponse.controllerApiVersions(
finalizedFeatures.metadataVersion().highestSupportedRecordVersion,
controllerApiVersions.get, controllerApiVersions.get,
listenerType, listenerType,
enableUnstableLastVersion, enableUnstableLastVersion,
clientTelemetryEnabled) clientTelemetryEnabled)
} else { } else {
ApiVersionsResponse.brokerApiVersions( ApiVersionsResponse.brokerApiVersions(
finalizedFeatures.metadataVersion().highestSupportedRecordVersion,
listenerType, listenerType,
enableUnstableLastVersion, enableUnstableLastVersion,
clientTelemetryEnabled) clientTelemetryEnabled)

View File

@ -60,7 +60,6 @@ class BrokerBlockingSender(sourceBroker: BrokerEndPoint,
brokerConfig.interBrokerListenerName, brokerConfig.interBrokerListenerName,
brokerConfig.saslMechanismInterBrokerProtocol, brokerConfig.saslMechanismInterBrokerProtocol,
time, time,
brokerConfig.saslInterBrokerHandshakeRequestEnable,
logContext logContext
) )
val reconfigurableChannelBuilder = channelBuilder match { val reconfigurableChannelBuilder = channelBuilder match {

View File

@ -71,8 +71,7 @@ import org.apache.kafka.coordinator.group.{Group, GroupCoordinator}
import org.apache.kafka.coordinator.share.ShareCoordinator import org.apache.kafka.coordinator.share.ShareCoordinator
import org.apache.kafka.server.ClientMetricsManager import org.apache.kafka.server.ClientMetricsManager
import org.apache.kafka.server.authorizer._ import org.apache.kafka.server.authorizer._
import org.apache.kafka.server.common.{GroupVersion, MetadataVersion, RequestLocal, TransactionVersion} import org.apache.kafka.server.common.{GroupVersion, RequestLocal, TransactionVersion}
import org.apache.kafka.server.common.MetadataVersion.{IBP_0_11_0_IV0, IBP_2_3_IV0}
import org.apache.kafka.server.share.context.ShareFetchContext import org.apache.kafka.server.share.context.ShareFetchContext
import org.apache.kafka.server.share.{ErroneousAndValidPartitionData, SharePartitionKey} import org.apache.kafka.server.share.{ErroneousAndValidPartitionData, SharePartitionKey}
import org.apache.kafka.server.share.acknowledge.ShareAcknowledgementBatch import org.apache.kafka.server.share.acknowledge.ShareAcknowledgementBatch
@ -297,12 +296,6 @@ class KafkaApis(val requestChannel: RequestChannel,
if (!authHelper.authorize(request.context, READ, GROUP, offsetCommitRequest.data.groupId)) { if (!authHelper.authorize(request.context, READ, GROUP, offsetCommitRequest.data.groupId)) {
requestHelper.sendMaybeThrottle(request, offsetCommitRequest.getErrorResponse(Errors.GROUP_AUTHORIZATION_FAILED.exception)) requestHelper.sendMaybeThrottle(request, offsetCommitRequest.getErrorResponse(Errors.GROUP_AUTHORIZATION_FAILED.exception))
CompletableFuture.completedFuture[Unit](()) CompletableFuture.completedFuture[Unit](())
} else if (offsetCommitRequest.data.groupInstanceId != null && metadataCache.metadataVersion().isLessThan(IBP_2_3_IV0)) {
// Only enable static membership when IBP >= 2.3, because it is not safe for the broker to use the static member logic
// until we are sure that all brokers support it. If static group being loaded by an older coordinator, it will discard
// the group.instance.id field, so static members could accidentally become "dynamic", which leads to wrong states.
requestHelper.sendMaybeThrottle(request, offsetCommitRequest.getErrorResponse(Errors.UNSUPPORTED_VERSION.exception))
CompletableFuture.completedFuture[Unit](())
} else { } else {
val authorizedTopics = authHelper.filterByAuthorized( val authorizedTopics = authHelper.filterByAuthorized(
request.context, request.context,
@ -1437,13 +1430,7 @@ class KafkaApis(val requestChannel: RequestChannel,
): CompletableFuture[Unit] = { ): CompletableFuture[Unit] = {
val joinGroupRequest = request.body[JoinGroupRequest] val joinGroupRequest = request.body[JoinGroupRequest]
if (joinGroupRequest.data.groupInstanceId != null && metadataCache.metadataVersion().isLessThan(IBP_2_3_IV0)) { if (!authHelper.authorize(request.context, READ, GROUP, joinGroupRequest.data.groupId)) {
// Only enable static membership when IBP >= 2.3, because it is not safe for the broker to use the static member logic
// until we are sure that all brokers support it. If static group being loaded by an older coordinator, it will discard
// the group.instance.id field, so static members could accidentally become "dynamic", which leads to wrong states.
requestHelper.sendMaybeThrottle(request, joinGroupRequest.getErrorResponse(Errors.UNSUPPORTED_VERSION.exception))
CompletableFuture.completedFuture[Unit](())
} else if (!authHelper.authorize(request.context, READ, GROUP, joinGroupRequest.data.groupId)) {
requestHelper.sendMaybeThrottle(request, joinGroupRequest.getErrorResponse(Errors.GROUP_AUTHORIZATION_FAILED.exception)) requestHelper.sendMaybeThrottle(request, joinGroupRequest.getErrorResponse(Errors.GROUP_AUTHORIZATION_FAILED.exception))
CompletableFuture.completedFuture[Unit](()) CompletableFuture.completedFuture[Unit](())
} else { } else {
@ -1467,13 +1454,7 @@ class KafkaApis(val requestChannel: RequestChannel,
): CompletableFuture[Unit] = { ): CompletableFuture[Unit] = {
val syncGroupRequest = request.body[SyncGroupRequest] val syncGroupRequest = request.body[SyncGroupRequest]
if (syncGroupRequest.data.groupInstanceId != null && metadataCache.metadataVersion().isLessThan(IBP_2_3_IV0)) { if (!syncGroupRequest.areMandatoryProtocolTypeAndNamePresent()) {
// Only enable static membership when IBP >= 2.3, because it is not safe for the broker to use the static member logic
// until we are sure that all brokers support it. If static group being loaded by an older coordinator, it will discard
// the group.instance.id field, so static members could accidentally become "dynamic", which leads to wrong states.
requestHelper.sendMaybeThrottle(request, syncGroupRequest.getErrorResponse(Errors.UNSUPPORTED_VERSION.exception))
CompletableFuture.completedFuture[Unit](())
} else if (!syncGroupRequest.areMandatoryProtocolTypeAndNamePresent()) {
// Starting from version 5, ProtocolType and ProtocolName fields are mandatory. // Starting from version 5, ProtocolType and ProtocolName fields are mandatory.
requestHelper.sendMaybeThrottle(request, syncGroupRequest.getErrorResponse(Errors.INCONSISTENT_GROUP_PROTOCOL.exception)) requestHelper.sendMaybeThrottle(request, syncGroupRequest.getErrorResponse(Errors.INCONSISTENT_GROUP_PROTOCOL.exception))
CompletableFuture.completedFuture[Unit](()) CompletableFuture.completedFuture[Unit](())
@ -1536,13 +1517,7 @@ class KafkaApis(val requestChannel: RequestChannel,
def handleHeartbeatRequest(request: RequestChannel.Request): CompletableFuture[Unit] = { def handleHeartbeatRequest(request: RequestChannel.Request): CompletableFuture[Unit] = {
val heartbeatRequest = request.body[HeartbeatRequest] val heartbeatRequest = request.body[HeartbeatRequest]
if (heartbeatRequest.data.groupInstanceId != null && metadataCache.metadataVersion().isLessThan(IBP_2_3_IV0)) { if (!authHelper.authorize(request.context, READ, GROUP, heartbeatRequest.data.groupId)) {
// Only enable static membership when IBP >= 2.3, because it is not safe for the broker to use the static member logic
// until we are sure that all brokers support it. If static group being loaded by an older coordinator, it will discard
// the group.instance.id field, so static members could accidentally become "dynamic", which leads to wrong states.
requestHelper.sendMaybeThrottle(request, heartbeatRequest.getErrorResponse(Errors.UNSUPPORTED_VERSION.exception))
CompletableFuture.completedFuture[Unit](())
} else if (!authHelper.authorize(request.context, READ, GROUP, heartbeatRequest.data.groupId)) {
requestHelper.sendMaybeThrottle(request, heartbeatRequest.getErrorResponse(Errors.GROUP_AUTHORIZATION_FAILED.exception)) requestHelper.sendMaybeThrottle(request, heartbeatRequest.getErrorResponse(Errors.GROUP_AUTHORIZATION_FAILED.exception))
CompletableFuture.completedFuture[Unit](()) CompletableFuture.completedFuture[Unit](())
} else { } else {
@ -1966,7 +1941,6 @@ class KafkaApis(val requestChannel: RequestChannel,
} }
def handleEndTxnRequest(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = { def handleEndTxnRequest(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = {
ensureInterBrokerVersion(IBP_0_11_0_IV0)
val endTxnRequest = request.body[EndTxnRequest] val endTxnRequest = request.body[EndTxnRequest]
val transactionalId = endTxnRequest.data.transactionalId val transactionalId = endTxnRequest.data.transactionalId
@ -2010,7 +1984,6 @@ class KafkaApis(val requestChannel: RequestChannel,
} }
def handleWriteTxnMarkersRequest(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = { def handleWriteTxnMarkersRequest(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = {
ensureInterBrokerVersion(IBP_0_11_0_IV0)
// We are checking for AlterCluster permissions first. If it is not present, we are authorizing cluster operation // We are checking for AlterCluster permissions first. If it is not present, we are authorizing cluster operation
// The latter will throw an exception if it is denied. // The latter will throw an exception if it is denied.
if (!authHelper.authorize(request.context, ALTER, CLUSTER, CLUSTER_NAME, logIfDenied = false)) { if (!authHelper.authorize(request.context, ALTER, CLUSTER, CLUSTER_NAME, logIfDenied = false)) {
@ -2183,13 +2156,7 @@ class KafkaApis(val requestChannel: RequestChannel,
requestHelper.sendResponseExemptThrottle(request, new WriteTxnMarkersResponse(errors)) requestHelper.sendResponseExemptThrottle(request, new WriteTxnMarkersResponse(errors))
} }
def ensureInterBrokerVersion(version: MetadataVersion): Unit = {
if (metadataCache.metadataVersion().isLessThan(version))
throw new UnsupportedVersionException(s"metadata.version: ${metadataCache.metadataVersion()} is less than the required version: ${version}")
}
def handleAddPartitionsToTxnRequest(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = { def handleAddPartitionsToTxnRequest(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = {
ensureInterBrokerVersion(IBP_0_11_0_IV0)
val addPartitionsToTxnRequest = val addPartitionsToTxnRequest =
if (request.context.apiVersion() < 4) if (request.context.apiVersion() < 4)
request.body[AddPartitionsToTxnRequest].normalizeRequest() request.body[AddPartitionsToTxnRequest].normalizeRequest()
@ -2302,7 +2269,6 @@ class KafkaApis(val requestChannel: RequestChannel,
} }
def handleAddOffsetsToTxnRequest(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = { def handleAddOffsetsToTxnRequest(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = {
ensureInterBrokerVersion(IBP_0_11_0_IV0)
val addOffsetsToTxnRequest = request.body[AddOffsetsToTxnRequest] val addOffsetsToTxnRequest = request.body[AddOffsetsToTxnRequest]
val transactionalId = addOffsetsToTxnRequest.data.transactionalId val transactionalId = addOffsetsToTxnRequest.data.transactionalId
val groupId = addOffsetsToTxnRequest.data.groupId val groupId = addOffsetsToTxnRequest.data.groupId
@ -2356,7 +2322,6 @@ class KafkaApis(val requestChannel: RequestChannel,
request: RequestChannel.Request, request: RequestChannel.Request,
requestLocal: RequestLocal requestLocal: RequestLocal
): CompletableFuture[Unit] = { ): CompletableFuture[Unit] = {
ensureInterBrokerVersion(IBP_0_11_0_IV0)
val txnOffsetCommitRequest = request.body[TxnOffsetCommitRequest] val txnOffsetCommitRequest = request.body[TxnOffsetCommitRequest]
def sendResponse(response: TxnOffsetCommitResponse): Unit = { def sendResponse(response: TxnOffsetCommitResponse): Unit = {
@ -3279,8 +3244,6 @@ class KafkaApis(val requestChannel: RequestChannel,
sendResponseCallback(Left(new ApiError(Errors.CLUSTER_AUTHORIZATION_FAILED))) sendResponseCallback(Left(new ApiError(Errors.CLUSTER_AUTHORIZATION_FAILED)))
} else if (!zkSupport.controller.isActive) { } else if (!zkSupport.controller.isActive) {
sendResponseCallback(Left(new ApiError(Errors.NOT_CONTROLLER))) sendResponseCallback(Left(new ApiError(Errors.NOT_CONTROLLER)))
} else if (!config.isFeatureVersioningSupported) {
sendResponseCallback(Left(new ApiError(Errors.INVALID_REQUEST, "Feature versioning system is disabled.")))
} else { } else {
zkSupport.controller.updateFeatures(updateFeaturesRequest, sendResponseCallback) zkSupport.controller.updateFeatures(updateFeaturesRequest, sendResponseCallback)
} }

View File

@ -24,12 +24,12 @@ import kafka.cluster.EndPoint
import kafka.utils.{CoreUtils, Logging} import kafka.utils.{CoreUtils, Logging}
import kafka.utils.Implicits._ import kafka.utils.Implicits._
import org.apache.kafka.common.Reconfigurable import org.apache.kafka.common.Reconfigurable
import org.apache.kafka.common.config.{ConfigDef, ConfigException, ConfigResource, SaslConfigs, TopicConfig} import org.apache.kafka.common.config.{ConfigDef, ConfigException, ConfigResource, TopicConfig}
import org.apache.kafka.common.config.ConfigDef.ConfigKey import org.apache.kafka.common.config.ConfigDef.ConfigKey
import org.apache.kafka.common.config.internals.BrokerSecurityConfigs import org.apache.kafka.common.config.internals.BrokerSecurityConfigs
import org.apache.kafka.common.config.types.Password import org.apache.kafka.common.config.types.Password
import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.network.ListenerName
import org.apache.kafka.common.record.{CompressionType, TimestampType} import org.apache.kafka.common.record.TimestampType
import org.apache.kafka.common.security.auth.KafkaPrincipalSerde import org.apache.kafka.common.security.auth.KafkaPrincipalSerde
import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.common.security.auth.SecurityProtocol
import org.apache.kafka.common.utils.Utils import org.apache.kafka.common.utils.Utils
@ -43,7 +43,6 @@ import org.apache.kafka.security.authorizer.AuthorizerUtils
import org.apache.kafka.server.ProcessRole import org.apache.kafka.server.ProcessRole
import org.apache.kafka.server.authorizer.Authorizer import org.apache.kafka.server.authorizer.Authorizer
import org.apache.kafka.server.common.MetadataVersion import org.apache.kafka.server.common.MetadataVersion
import org.apache.kafka.server.common.MetadataVersion._
import org.apache.kafka.server.config.{AbstractKafkaConfig, DelegationTokenManagerConfigs, KRaftConfigs, QuotaConfig, ReplicationConfigs, ServerConfigs, ServerLogConfigs, ShareCoordinatorConfig, ZkConfigs} import org.apache.kafka.server.config.{AbstractKafkaConfig, DelegationTokenManagerConfigs, KRaftConfigs, QuotaConfig, ReplicationConfigs, ServerConfigs, ServerLogConfigs, ShareCoordinatorConfig, ZkConfigs}
import org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig import org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig
import org.apache.kafka.server.metrics.MetricConfigs import org.apache.kafka.server.metrics.MetricConfigs
@ -426,9 +425,6 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _])
/** ********* Controlled shutdown configuration ***********/ /** ********* Controlled shutdown configuration ***********/
val controlledShutdownEnable = getBoolean(ServerConfigs.CONTROLLED_SHUTDOWN_ENABLE_CONFIG) val controlledShutdownEnable = getBoolean(ServerConfigs.CONTROLLED_SHUTDOWN_ENABLE_CONFIG)
/** ********* Feature configuration ***********/
def isFeatureVersioningSupported = interBrokerProtocolVersion.isFeatureVersioningSupported
/** New group coordinator configs */ /** New group coordinator configs */
val isNewGroupCoordinatorEnabled = getBoolean(GroupCoordinatorConfig.NEW_GROUP_COORDINATOR_ENABLE_CONFIG) val isNewGroupCoordinatorEnabled = getBoolean(GroupCoordinatorConfig.NEW_GROUP_COORDINATOR_ENABLE_CONFIG)
val groupCoordinatorRebalanceProtocols = { val groupCoordinatorRebalanceProtocols = {
@ -475,7 +471,6 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _])
def interBrokerListenerName = getInterBrokerListenerNameAndSecurityProtocol._1 def interBrokerListenerName = getInterBrokerListenerNameAndSecurityProtocol._1
def interBrokerSecurityProtocol = getInterBrokerListenerNameAndSecurityProtocol._2 def interBrokerSecurityProtocol = getInterBrokerListenerNameAndSecurityProtocol._2
def saslMechanismInterBrokerProtocol = getString(BrokerSecurityConfigs.SASL_MECHANISM_INTER_BROKER_PROTOCOL_CONFIG) def saslMechanismInterBrokerProtocol = getString(BrokerSecurityConfigs.SASL_MECHANISM_INTER_BROKER_PROTOCOL_CONFIG)
val saslInterBrokerHandshakeRequestEnable = interBrokerProtocolVersion.isSaslInterBrokerHandshakeRequestEnabled
/** ********* DelegationToken Configuration **************/ /** ********* DelegationToken Configuration **************/
val delegationTokenSecretKey = getPassword(DelegationTokenManagerConfigs.DELEGATION_TOKEN_SECRET_KEY_CONFIG) val delegationTokenSecretKey = getPassword(DelegationTokenManagerConfigs.DELEGATION_TOKEN_SECRET_KEY_CONFIG)
@ -634,10 +629,6 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _])
} }
} }
// Topic IDs are used with all self-managed quorum clusters and ZK cluster with IBP greater than or equal to 2.8
def usesTopicId: Boolean =
usesSelfManagedQuorum || interBrokerProtocolVersion.isTopicIdsSupported
validateValues() validateValues()
private def validateValues(): Unit = { private def validateValues(): Unit = {
@ -771,14 +762,7 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _])
s"${SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG} cannot use the nonroutable meta-address 0.0.0.0. "+ s"${SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG} cannot use the nonroutable meta-address 0.0.0.0. "+
s"Use a routable IP address.") s"Use a routable IP address.")
if (groupCoordinatorConfig.offsetTopicCompressionType == CompressionType.ZSTD)
require(interBrokerProtocolVersion.highestSupportedRecordVersion().value >= IBP_2_1_IV0.highestSupportedRecordVersion().value,
"offsets.topic.compression.codec zstd can only be used when inter.broker.protocol.version " +
s"is set to version ${IBP_2_1_IV0.shortVersion} or higher")
val interBrokerUsesSasl = interBrokerSecurityProtocol == SecurityProtocol.SASL_PLAINTEXT || interBrokerSecurityProtocol == SecurityProtocol.SASL_SSL val interBrokerUsesSasl = interBrokerSecurityProtocol == SecurityProtocol.SASL_PLAINTEXT || interBrokerSecurityProtocol == SecurityProtocol.SASL_SSL
require(!interBrokerUsesSasl || saslInterBrokerHandshakeRequestEnable || saslMechanismInterBrokerProtocol == SaslConfigs.GSSAPI_MECHANISM,
s"Only GSSAPI mechanism is supported for inter-broker communication with SASL when inter.broker.protocol.version is set to $interBrokerProtocolVersionString")
require(!interBrokerUsesSasl || saslEnabledMechanisms(interBrokerListenerName).contains(saslMechanismInterBrokerProtocol), require(!interBrokerUsesSasl || saslEnabledMechanisms(interBrokerListenerName).contains(saslMechanismInterBrokerProtocol),
s"${BrokerSecurityConfigs.SASL_MECHANISM_INTER_BROKER_PROTOCOL_CONFIG} must be included in ${BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG} when SASL is used for inter-broker communication") s"${BrokerSecurityConfigs.SASL_MECHANISM_INTER_BROKER_PROTOCOL_CONFIG} must be included in ${BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG} when SASL is used for inter-broker communication")
require(queuedMaxBytes <= 0 || queuedMaxBytes >= socketRequestMaxBytes, require(queuedMaxBytes <= 0 || queuedMaxBytes >= socketRequestMaxBytes,

View File

@ -157,7 +157,6 @@ class NodeToControllerChannelManagerImpl(
controllerInfo.listenerName, controllerInfo.listenerName,
controllerInfo.saslMechanism, controllerInfo.saslMechanism,
time, time,
config.saslInterBrokerHandshakeRequestEnable,
logContext logContext
) )
channelBuilder match { channelBuilder match {

View File

@ -63,7 +63,7 @@ class RemoteLeaderEndPoint(logPrefix: String,
private val maxBytes = brokerConfig.replicaFetchResponseMaxBytes private val maxBytes = brokerConfig.replicaFetchResponseMaxBytes
private val fetchSize = brokerConfig.replicaFetchMaxBytes private val fetchSize = brokerConfig.replicaFetchMaxBytes
override def isTruncationOnFetchSupported: Boolean = metadataVersionSupplier().isTruncationOnFetchSupported override def isTruncationOnFetchSupported: Boolean = true
override def initiateClose(): Unit = blockingSender.initiateClose() override def initiateClose(): Unit = blockingSender.initiateClose()
@ -143,8 +143,7 @@ class RemoteLeaderEndPoint(logPrefix: String,
topic.partitions.add(epochData) topic.partitions.add(epochData)
} }
val epochRequest = OffsetsForLeaderEpochRequest.Builder.forFollower( val epochRequest = OffsetsForLeaderEpochRequest.Builder.forFollower(topics, brokerConfig.brokerId)
metadataVersionSupplier().offsetForLeaderEpochRequestVersion, topics, brokerConfig.brokerId)
debug(s"Sending offset for leader epoch request $epochRequest") debug(s"Sending offset for leader epoch request $epochRequest")
try { try {

View File

@ -169,8 +169,6 @@ class ReplicaAlterLogDirsThread(name: String,
} }
} }
override protected val isOffsetForLeaderEpochSupported: Boolean = true
/** /**
* Truncate the log for each partition based on current replica's returned epoch and offset. * Truncate the log for each partition based on current replica's returned epoch and offset.
* *

View File

@ -47,8 +47,6 @@ class ReplicaFetcherThread(name: String,
// Visible for testing // Visible for testing
private[server] val partitionsWithNewHighWatermark = mutable.Buffer[TopicPartition]() private[server] val partitionsWithNewHighWatermark = mutable.Buffer[TopicPartition]()
override protected val isOffsetForLeaderEpochSupported: Boolean = metadataVersionSupplier().isOffsetForLeaderEpochSupported
override protected def latestEpoch(topicPartition: TopicPartition): Option[Int] = { override protected def latestEpoch(topicPartition: TopicPartition): Option[Int] = {
replicaMgr.localLogOrException(topicPartition).latestEpoch replicaMgr.localLogOrException(topicPartition).latestEpoch
} }

View File

@ -57,7 +57,6 @@ import org.apache.kafka.metadata.LeaderAndIsr
import org.apache.kafka.metadata.LeaderConstants.NO_LEADER import org.apache.kafka.metadata.LeaderConstants.NO_LEADER
import org.apache.kafka.server.{ActionQueue, DelayedActionQueue, common} import org.apache.kafka.server.{ActionQueue, DelayedActionQueue, common}
import org.apache.kafka.server.common.{DirectoryEventHandler, RequestLocal, StopPartition, TopicOptionalIdPartition} import org.apache.kafka.server.common.{DirectoryEventHandler, RequestLocal, StopPartition, TopicOptionalIdPartition}
import org.apache.kafka.server.common.MetadataVersion._
import org.apache.kafka.server.metrics.KafkaMetricsGroup import org.apache.kafka.server.metrics.KafkaMetricsGroup
import org.apache.kafka.server.network.BrokerEndPoint import org.apache.kafka.server.network.BrokerEndPoint
import org.apache.kafka.server.purgatory.{DelayedOperationKey, DelayedOperationPurgatory, TopicPartitionOperationKey} import org.apache.kafka.server.purgatory.{DelayedOperationKey, DelayedOperationPurgatory, TopicPartitionOperationKey}
@ -340,13 +339,9 @@ class ReplicaManager(val config: KafkaConfig,
private var logDirFailureHandler: LogDirFailureHandler = _ private var logDirFailureHandler: LogDirFailureHandler = _
private class LogDirFailureHandler(name: String, haltBrokerOnDirFailure: Boolean) extends ShutdownableThread(name) { private class LogDirFailureHandler(name: String) extends ShutdownableThread(name) {
override def doWork(): Unit = { override def doWork(): Unit = {
val newOfflineLogDir = logDirFailureChannel.takeNextOfflineLogDir() val newOfflineLogDir = logDirFailureChannel.takeNextOfflineLogDir()
if (haltBrokerOnDirFailure) {
fatal(s"Halting broker because dir $newOfflineLogDir is offline")
Exit.halt(1)
}
handleLogDirFailure(newOfflineLogDir) handleLogDirFailure(newOfflineLogDir)
} }
} }
@ -412,11 +407,7 @@ class ReplicaManager(val config: KafkaConfig,
scheduler.schedule("isr-expiration", () => maybeShrinkIsr(), 0L, config.replicaLagTimeMaxMs / 2) scheduler.schedule("isr-expiration", () => maybeShrinkIsr(), 0L, config.replicaLagTimeMaxMs / 2)
scheduler.schedule("shutdown-idle-replica-alter-log-dirs-thread", () => shutdownIdleReplicaAlterLogDirsThread(), 0L, 10000L) scheduler.schedule("shutdown-idle-replica-alter-log-dirs-thread", () => shutdownIdleReplicaAlterLogDirsThread(), 0L, 10000L)
// If inter-broker protocol (IBP) < 1.0, the controller will send LeaderAndIsrRequest V0 which does not include isNew field. logDirFailureHandler = new LogDirFailureHandler("LogDirFailureHandler")
// In this case, the broker receiving the request cannot determine whether it is safe to create a partition if a log directory has failed.
// Thus, we choose to halt the broker on any log directory failure if IBP < 1.0
val haltBrokerOnFailure = metadataCache.metadataVersion().isLessThan(IBP_1_0_IV0)
logDirFailureHandler = new LogDirFailureHandler("LogDirFailureHandler", haltBrokerOnFailure)
logDirFailureHandler.start() logDirFailureHandler.start()
addPartitionsToTxnManager.foreach(_.start()) addPartitionsToTxnManager.foreach(_.start())
remoteLogManager.foreach(rlm => rlm.setDelayedOperationPurgatory(delayedRemoteListOffsetsPurgatory)) remoteLogManager.foreach(rlm => rlm.setDelayedOperationPurgatory(delayedRemoteListOffsetsPurgatory))
@ -2562,7 +2553,7 @@ class ReplicaManager(val config: KafkaConfig,
* OffsetForLeaderEpoch request. * OffsetForLeaderEpoch request.
*/ */
protected def initialFetchOffset(log: UnifiedLog): Long = { protected def initialFetchOffset(log: UnifiedLog): Long = {
if (metadataCache.metadataVersion().isTruncationOnFetchSupported && log.latestEpoch.nonEmpty) if (log.latestEpoch.nonEmpty)
log.logEndOffset log.logEndOffset
else else
log.highWatermark log.highWatermark

View File

@ -213,7 +213,7 @@ class ZkAdminManager(val config: KafkaConfig,
CreatePartitionsMetadata(topic.name, assignments.keySet) CreatePartitionsMetadata(topic.name, assignments.keySet)
} else { } else {
controllerMutationQuota.record(assignments.size) controllerMutationQuota.record(assignments.size)
adminZkClient.createTopicWithAssignment(topic.name, configs, assignments, validate = false, config.usesTopicId) adminZkClient.createTopicWithAssignment(topic.name, configs, assignments, validate = false)
populateIds(includeConfigsAndMetadata, topic.name) populateIds(includeConfigsAndMetadata, topic.name)
CreatePartitionsMetadata(topic.name, assignments.keySet) CreatePartitionsMetadata(topic.name, assignments.keySet)
} }

View File

@ -314,7 +314,7 @@ object StorageTool extends Logging {
formatParser.addArgument("--release-version", "-r") formatParser.addArgument("--release-version", "-r")
.action(store()) .action(store())
.help(s"The release version to use for the initial feature settings. The minimum is " + .help(s"The release version to use for the initial feature settings. The minimum is " +
s"${MetadataVersion.IBP_3_0_IV0}; the default is ${MetadataVersion.LATEST_PRODUCTION}") s"${MetadataVersion.IBP_3_0_IV1}; the default is ${MetadataVersion.LATEST_PRODUCTION}")
formatParser.addArgument("--feature", "-f") formatParser.addArgument("--feature", "-f")
.help("The setting to use for a specific feature, in feature=level format. For example: `kraft.version=1`.") .help("The setting to use for a specific feature, in feature=level format. For example: `kraft.version=1`.")
@ -347,7 +347,7 @@ object StorageTool extends Logging {
versionMappingParser.addArgument("--release-version", "-r") versionMappingParser.addArgument("--release-version", "-r")
.action(store()) .action(store())
.help(s"The release version to use for the corresponding feature mapping. The minimum is " + .help(s"The release version to use for the corresponding feature mapping. The minimum is " +
s"${MetadataVersion.IBP_3_0_IV0}; the default is ${MetadataVersion.LATEST_PRODUCTION}") s"${MetadataVersion.IBP_3_0_IV1}; the default is ${MetadataVersion.LATEST_PRODUCTION}")
} }
private def addFeatureDependenciesParser(subparsers: Subparsers): Unit = { private def addFeatureDependenciesParser(subparsers: Subparsers): Unit = {

View File

@ -39,7 +39,6 @@ import org.apache.kafka.common.test.api.ClusterTestExtensions;
import org.apache.kafka.common.test.api.Type; import org.apache.kafka.common.test.api.Type;
import org.apache.kafka.coordinator.group.GroupCoordinatorConfig; import org.apache.kafka.coordinator.group.GroupCoordinatorConfig;
import org.apache.kafka.metadata.BrokerState; import org.apache.kafka.metadata.BrokerState;
import org.apache.kafka.server.common.MetadataVersion;
import org.apache.kafka.server.common.RequestLocal; import org.apache.kafka.server.common.RequestLocal;
import org.apache.kafka.server.config.ServerConfigs; import org.apache.kafka.server.config.ServerConfigs;
import org.apache.kafka.storage.internals.log.AppendOrigin; import org.apache.kafka.storage.internals.log.AppendOrigin;
@ -374,7 +373,6 @@ public class DeleteTopicTest {
), ),
0, 0,
AppendOrigin.CLIENT, AppendOrigin.CLIENT,
MetadataVersion.LATEST_PRODUCTION,
RequestLocal.noCaching(), RequestLocal.noCaching(),
VerificationGuard.SENTINEL VerificationGuard.SENTINEL
); );

View File

@ -260,8 +260,7 @@ class GssapiAuthenticationTest extends IntegrationTestHarness with SaslSetup {
private def createSelector(): Selector = { private def createSelector(): Selector = {
val channelBuilder = ChannelBuilders.clientChannelBuilder(securityProtocol, val channelBuilder = ChannelBuilders.clientChannelBuilder(securityProtocol,
JaasContext.Type.CLIENT, new TestSecurityConfig(clientConfig), null, kafkaClientSaslMechanism, JaasContext.Type.CLIENT, new TestSecurityConfig(clientConfig), null, kafkaClientSaslMechanism, time, new LogContext())
time, true, new LogContext())
NetworkTestUtils.createSelector(channelBuilder, time) NetworkTestUtils.createSelector(channelBuilder, time)
} }
@ -270,7 +269,7 @@ class GssapiAuthenticationTest extends IntegrationTestHarness with SaslSetup {
val config = new TestSecurityConfig(clientConfig) val config = new TestSecurityConfig(clientConfig)
val jaasContexts = Collections.singletonMap("GSSAPI", JaasContext.loadClientContext(config.values())) val jaasContexts = Collections.singletonMap("GSSAPI", JaasContext.loadClientContext(config.values()))
val channelBuilder = new SaslChannelBuilder(ConnectionMode.CLIENT, jaasContexts, securityProtocol, val channelBuilder = new SaslChannelBuilder(ConnectionMode.CLIENT, jaasContexts, securityProtocol,
null, false, kafkaClientSaslMechanism, true, null, null, null, time, new LogContext(), null, false, kafkaClientSaslMechanism, null, null, null, time, new LogContext(),
_ => org.apache.kafka.test.TestUtils.defaultApiVersionsResponse(ListenerType.ZK_BROKER)) { _ => org.apache.kafka.test.TestUtils.defaultApiVersionsResponse(ListenerType.ZK_BROKER)) {
override protected def defaultLoginClass(): Class[_ <: Login] = classOf[TestableKerberosLogin] override protected def defaultLoginClass(): Class[_ <: Login] = classOf[TestableKerberosLogin]
} }

View File

@ -446,7 +446,7 @@ class KRaftClusterTest {
"metadata from testkit", assertThrows(classOf[RuntimeException], () => { "metadata from testkit", assertThrows(classOf[RuntimeException], () => {
new KafkaClusterTestKit.Builder( new KafkaClusterTestKit.Builder(
new TestKitNodes.Builder(). new TestKitNodes.Builder().
setBootstrapMetadataVersion(MetadataVersion.IBP_2_7_IV0). setBootstrapMetadataVersion(MetadataVersion.IBP_3_0_IV1).
setNumBrokerNodes(1). setNumBrokerNodes(1).
setNumControllerNodes(1).build()).build() setNumControllerNodes(1).build()).build()
}).getMessage) }).getMessage)

View File

@ -84,7 +84,6 @@ class AbstractPartitionTest {
alterPartitionListener = createIsrChangeListener() alterPartitionListener = createIsrChangeListener()
partition = new Partition(topicPartition, partition = new Partition(topicPartition,
replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT,
interBrokerProtocolVersion = interBrokerProtocolVersion,
localBrokerId = brokerId, localBrokerId = brokerId,
() => defaultBrokerEpoch(brokerId), () => defaultBrokerEpoch(brokerId),
time, time,

View File

@ -35,7 +35,7 @@ import org.apache.kafka.common.utils.Utils
import org.apache.kafka.common.{TopicPartition, Uuid} import org.apache.kafka.common.{TopicPartition, Uuid}
import org.apache.kafka.coordinator.transaction.TransactionLogConfig import org.apache.kafka.coordinator.transaction.TransactionLogConfig
import org.apache.kafka.metadata.LeaderAndIsr import org.apache.kafka.metadata.LeaderAndIsr
import org.apache.kafka.server.common.{MetadataVersion, RequestLocal} import org.apache.kafka.server.common.RequestLocal
import org.apache.kafka.server.config.ReplicationConfigs import org.apache.kafka.server.config.ReplicationConfigs
import org.apache.kafka.server.storage.log.{FetchIsolation, FetchParams} import org.apache.kafka.server.storage.log.{FetchIsolation, FetchParams}
import org.apache.kafka.server.util.MockTime import org.apache.kafka.server.util.MockTime
@ -275,7 +275,6 @@ class PartitionLockTest extends Logging {
logManager.startup(Set.empty) logManager.startup(Set.empty)
val partition = new Partition(topicPartition, val partition = new Partition(topicPartition,
replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT,
interBrokerProtocolVersion = MetadataVersion.latestTesting,
localBrokerId = brokerId, localBrokerId = brokerId,
() => 1L, () => 1L,
mockTime, mockTime,
@ -457,8 +456,8 @@ class PartitionLockTest extends Logging {
keepPartitionMetadataFile = true) { keepPartitionMetadataFile = true) {
override def appendAsLeader(records: MemoryRecords, leaderEpoch: Int, origin: AppendOrigin, override def appendAsLeader(records: MemoryRecords, leaderEpoch: Int, origin: AppendOrigin,
interBrokerProtocolVersion: MetadataVersion, requestLocal: RequestLocal, verificationGuard: VerificationGuard): LogAppendInfo = { requestLocal: RequestLocal, verificationGuard: VerificationGuard): LogAppendInfo = {
val appendInfo = super.appendAsLeader(records, leaderEpoch, origin, interBrokerProtocolVersion, requestLocal, verificationGuard) val appendInfo = super.appendAsLeader(records, leaderEpoch, origin, requestLocal, verificationGuard)
appendSemaphore.acquire() appendSemaphore.acquire()
appendInfo appendInfo
} }

View File

@ -431,7 +431,6 @@ class PartitionTest extends AbstractPartitionTest {
partition = new Partition( partition = new Partition(
topicPartition, topicPartition,
replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT,
interBrokerProtocolVersion = MetadataVersion.latestTesting,
localBrokerId = brokerId, localBrokerId = brokerId,
() => defaultBrokerEpoch(brokerId), () => defaultBrokerEpoch(brokerId),
time, time,
@ -1270,7 +1269,6 @@ class PartitionTest extends AbstractPartitionTest {
configRepository.setTopicConfig(topicPartition.topic, TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "2") configRepository.setTopicConfig(topicPartition.topic, TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "2")
partition = new Partition(topicPartition, partition = new Partition(topicPartition,
replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT,
interBrokerProtocolVersion = interBrokerProtocolVersion,
localBrokerId = brokerId, localBrokerId = brokerId,
() => defaultBrokerEpoch(brokerId), () => defaultBrokerEpoch(brokerId),
time, time,
@ -1361,7 +1359,6 @@ class PartitionTest extends AbstractPartitionTest {
val mockMetadataCache: KRaftMetadataCache = mock(classOf[KRaftMetadataCache]) val mockMetadataCache: KRaftMetadataCache = mock(classOf[KRaftMetadataCache])
val partition = spy(new Partition(topicPartition, val partition = spy(new Partition(topicPartition,
replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT,
interBrokerProtocolVersion = interBrokerProtocolVersion,
localBrokerId = brokerId, localBrokerId = brokerId,
() => defaultBrokerEpoch(brokerId), () => defaultBrokerEpoch(brokerId),
time, time,
@ -1593,7 +1590,6 @@ class PartitionTest extends AbstractPartitionTest {
val partition = new Partition( val partition = new Partition(
topicPartition, topicPartition,
replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT,
interBrokerProtocolVersion = MetadataVersion.latestTesting,
localBrokerId = brokerId, localBrokerId = brokerId,
() => defaultBrokerEpoch(brokerId), () => defaultBrokerEpoch(brokerId),
time, time,
@ -1701,7 +1697,6 @@ class PartitionTest extends AbstractPartitionTest {
val partition = new Partition( val partition = new Partition(
topicPartition, topicPartition,
replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT,
interBrokerProtocolVersion = MetadataVersion.latestTesting,
localBrokerId = brokerId, localBrokerId = brokerId,
() => defaultBrokerEpoch(brokerId), () => defaultBrokerEpoch(brokerId),
time, time,
@ -1808,7 +1803,6 @@ class PartitionTest extends AbstractPartitionTest {
val partition = new Partition( val partition = new Partition(
topicPartition, topicPartition,
replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT,
interBrokerProtocolVersion = MetadataVersion.latestTesting,
localBrokerId = brokerId, localBrokerId = brokerId,
() => defaultBrokerEpoch(brokerId), () => defaultBrokerEpoch(brokerId),
time, time,
@ -1900,7 +1894,6 @@ class PartitionTest extends AbstractPartitionTest {
val partition = new Partition( val partition = new Partition(
topicPartition, topicPartition,
replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT,
interBrokerProtocolVersion = MetadataVersion.latestTesting,
localBrokerId = brokerId, localBrokerId = brokerId,
() => defaultBrokerEpoch(brokerId), () => defaultBrokerEpoch(brokerId),
time, time,
@ -1966,7 +1959,6 @@ class PartitionTest extends AbstractPartitionTest {
val partition = new Partition( val partition = new Partition(
topicPartition, topicPartition,
replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT,
interBrokerProtocolVersion = MetadataVersion.latestTesting,
localBrokerId = brokerId, localBrokerId = brokerId,
() => defaultBrokerEpoch(brokerId), () => defaultBrokerEpoch(brokerId),
time, time,
@ -2122,7 +2114,6 @@ class PartitionTest extends AbstractPartitionTest {
val partition = new Partition( val partition = new Partition(
topicPartition, topicPartition,
replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT,
interBrokerProtocolVersion = MetadataVersion.latestTesting,
localBrokerId = brokerId, localBrokerId = brokerId,
() => defaultBrokerEpoch(brokerId), () => defaultBrokerEpoch(brokerId),
time, time,
@ -2205,7 +2196,6 @@ class PartitionTest extends AbstractPartitionTest {
val partition = new Partition( val partition = new Partition(
topicPartition, topicPartition,
replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT,
interBrokerProtocolVersion = MetadataVersion.IBP_3_7_IV2,
localBrokerId = brokerId, localBrokerId = brokerId,
() => defaultBrokerEpoch(brokerId), () => defaultBrokerEpoch(brokerId),
time, time,
@ -2578,12 +2568,11 @@ class PartitionTest extends AbstractPartitionTest {
time = time, time = time,
brokerId = brokerId, brokerId = brokerId,
brokerEpochSupplier = () => 0, brokerEpochSupplier = () => 0,
metadataVersionSupplier = () => MetadataVersion.IBP_3_0_IV0 metadataVersionSupplier = () => MetadataVersion.IBP_3_0_IV1
) )
partition = new Partition(topicPartition, partition = new Partition(topicPartition,
replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT,
interBrokerProtocolVersion = interBrokerProtocolVersion,
localBrokerId = brokerId, localBrokerId = brokerId,
() => defaultBrokerEpoch(brokerId), () => defaultBrokerEpoch(brokerId),
time, time,
@ -2730,7 +2719,6 @@ class PartitionTest extends AbstractPartitionTest {
// Create new Partition object for same topicPartition // Create new Partition object for same topicPartition
val partition2 = new Partition(topicPartition, val partition2 = new Partition(topicPartition,
replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT,
interBrokerProtocolVersion = MetadataVersion.latestTesting,
localBrokerId = brokerId, localBrokerId = brokerId,
() => defaultBrokerEpoch(brokerId), () => defaultBrokerEpoch(brokerId),
time, time,
@ -2775,7 +2763,6 @@ class PartitionTest extends AbstractPartitionTest {
// Create new Partition object for same topicPartition // Create new Partition object for same topicPartition
val partition2 = new Partition(topicPartition, val partition2 = new Partition(topicPartition,
replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT,
interBrokerProtocolVersion = MetadataVersion.latestTesting,
localBrokerId = brokerId, localBrokerId = brokerId,
() => defaultBrokerEpoch(brokerId), () => defaultBrokerEpoch(brokerId),
time, time,
@ -2858,7 +2845,7 @@ class PartitionTest extends AbstractPartitionTest {
def testUpdateAssignmentAndIsr(): Unit = { def testUpdateAssignmentAndIsr(): Unit = {
val topicPartition = new TopicPartition("test", 1) val topicPartition = new TopicPartition("test", 1)
val partition = new Partition( val partition = new Partition(
topicPartition, 1000, MetadataVersion.latestTesting, 0, () => defaultBrokerEpoch(0), topicPartition, 1000, 0, () => defaultBrokerEpoch(0),
Time.SYSTEM, mock(classOf[AlterPartitionListener]), mock(classOf[DelayedOperations]), Time.SYSTEM, mock(classOf[AlterPartitionListener]), mock(classOf[DelayedOperations]),
mock(classOf[KRaftMetadataCache]), mock(classOf[LogManager]), mock(classOf[AlterPartitionManager])) mock(classOf[KRaftMetadataCache]), mock(classOf[LogManager]), mock(classOf[AlterPartitionManager]))
@ -2933,7 +2920,6 @@ class PartitionTest extends AbstractPartitionTest {
val spyLogManager = spy(logManager) val spyLogManager = spy(logManager)
val partition = new Partition(topicPartition, val partition = new Partition(topicPartition,
replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT,
interBrokerProtocolVersion = MetadataVersion.latestTesting,
localBrokerId = brokerId, localBrokerId = brokerId,
() => defaultBrokerEpoch(brokerId), () => defaultBrokerEpoch(brokerId),
time, time,
@ -2972,7 +2958,6 @@ class PartitionTest extends AbstractPartitionTest {
val partition = new Partition(topicPartition, val partition = new Partition(topicPartition,
replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT,
interBrokerProtocolVersion = MetadataVersion.latestTesting,
localBrokerId = brokerId, localBrokerId = brokerId,
() => defaultBrokerEpoch(brokerId), () => defaultBrokerEpoch(brokerId),
time, time,
@ -3014,7 +2999,6 @@ class PartitionTest extends AbstractPartitionTest {
val partition = new Partition(topicPartition, val partition = new Partition(topicPartition,
replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT,
interBrokerProtocolVersion = MetadataVersion.latestTesting,
localBrokerId = brokerId, localBrokerId = brokerId,
() => defaultBrokerEpoch(brokerId), () => defaultBrokerEpoch(brokerId),
time, time,
@ -3766,7 +3750,6 @@ class PartitionTest extends AbstractPartitionTest {
val spyLogManager = spy(logManager) val spyLogManager = spy(logManager)
val partition = new Partition(topicPartition, val partition = new Partition(topicPartition,
replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT,
interBrokerProtocolVersion = MetadataVersion.latestTesting,
localBrokerId = brokerId, localBrokerId = brokerId,
() => defaultBrokerEpoch(brokerId), () => defaultBrokerEpoch(brokerId),
time, time,
@ -3812,7 +3795,6 @@ class PartitionTest extends AbstractPartitionTest {
val spyLogManager = spy(logManager) val spyLogManager = spy(logManager)
val partition = new Partition(topicPartition, val partition = new Partition(topicPartition,
replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT,
interBrokerProtocolVersion = MetadataVersion.latestTesting,
localBrokerId = brokerId, localBrokerId = brokerId,
() => defaultBrokerEpoch(brokerId), () => defaultBrokerEpoch(brokerId),
time, time,
@ -3858,7 +3840,6 @@ class PartitionTest extends AbstractPartitionTest {
val spyLogManager = spy(logManager) val spyLogManager = spy(logManager)
val partition = new Partition(topicPartition, val partition = new Partition(topicPartition,
replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT,
interBrokerProtocolVersion = MetadataVersion.latestTesting,
localBrokerId = brokerId, localBrokerId = brokerId,
() => defaultBrokerEpoch(brokerId), () => defaultBrokerEpoch(brokerId),
time, time,
@ -3904,7 +3885,6 @@ class PartitionTest extends AbstractPartitionTest {
val spyLogManager = spy(logManager) val spyLogManager = spy(logManager)
val partition = new Partition(topicPartition, val partition = new Partition(topicPartition,
replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT,
interBrokerProtocolVersion = MetadataVersion.latestTesting,
localBrokerId = brokerId, localBrokerId = brokerId,
() => defaultBrokerEpoch(brokerId), () => defaultBrokerEpoch(brokerId),
time, time,
@ -3950,7 +3930,6 @@ class PartitionTest extends AbstractPartitionTest {
val spyLogManager = spy(logManager) val spyLogManager = spy(logManager)
val partition = new Partition(topicPartition, val partition = new Partition(topicPartition,
replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT,
interBrokerProtocolVersion = MetadataVersion.latestTesting,
localBrokerId = brokerId, localBrokerId = brokerId,
() => defaultBrokerEpoch(brokerId), () => defaultBrokerEpoch(brokerId),
time, time,
@ -3997,7 +3976,6 @@ class PartitionTest extends AbstractPartitionTest {
val spyLogManager = spy(logManager) val spyLogManager = spy(logManager)
val partition = new Partition(topicPartition, val partition = new Partition(topicPartition,
replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT,
interBrokerProtocolVersion = MetadataVersion.latestTesting,
localBrokerId = brokerId, localBrokerId = brokerId,
() => defaultBrokerEpoch(brokerId), () => defaultBrokerEpoch(brokerId),
time, time,
@ -4052,7 +4030,6 @@ class PartitionTest extends AbstractPartitionTest {
val spyLogManager = spy(logManager) val spyLogManager = spy(logManager)
val partition = new Partition(topicPartition, val partition = new Partition(topicPartition,
replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT,
interBrokerProtocolVersion = MetadataVersion.latestTesting,
localBrokerId = brokerId, localBrokerId = brokerId,
() => defaultBrokerEpoch(brokerId), () => defaultBrokerEpoch(brokerId),
time, time,

View File

@ -45,7 +45,6 @@ import org.apache.kafka.common.utils.Utils
import org.apache.kafka.coordinator.group.{GroupCoordinatorConfig, OffsetAndMetadata, OffsetConfig} import org.apache.kafka.coordinator.group.{GroupCoordinatorConfig, OffsetAndMetadata, OffsetConfig}
import org.apache.kafka.coordinator.group.generated.{GroupMetadataValue, OffsetCommitValue} import org.apache.kafka.coordinator.group.generated.{GroupMetadataValue, OffsetCommitValue}
import org.apache.kafka.server.common.{MetadataVersion, RequestLocal} import org.apache.kafka.server.common.{MetadataVersion, RequestLocal}
import org.apache.kafka.server.common.MetadataVersion._
import org.apache.kafka.server.metrics.KafkaYammerMetrics import org.apache.kafka.server.metrics.KafkaYammerMetrics
import org.apache.kafka.server.storage.log.FetchIsolation import org.apache.kafka.server.storage.log.FetchIsolation
import org.apache.kafka.server.util.{KafkaScheduler, MockTime} import org.apache.kafka.server.util.{KafkaScheduler, MockTime}
@ -1059,22 +1058,20 @@ class GroupMetadataManagerTest {
} }
@Test @Test
def testCurrentStateTimestampForAllGroupMetadataVersions(): Unit = { def testCurrentStateTimestampForAllVersions(): Unit = {
val generation = 1 val generation = 1
val protocol = "range" val protocol = "range"
val memberId = "memberId" val memberId = "memberId"
for (metadataVersion <- MetadataVersion.VERSIONS) { for (version <- 0 to 3) {
val groupMetadataRecord = buildStableGroupRecordWithMember(generation, protocolType, protocol, memberId, metadataVersion = metadataVersion) val groupMetadataRecord = buildStableGroupRecordWithMember(generation, protocolType, protocol, memberId,
groupMetadataValueVersion = version.toShort)
val deserializedGroupMetadata = GroupMetadataManager.readGroupMessageValue(groupId, groupMetadataRecord.value(), time) val deserializedGroupMetadata = GroupMetadataManager.readGroupMessageValue(groupId, groupMetadataRecord.value(), time)
// GROUP_METADATA_VALUE_SCHEMA_V2 or higher should correctly set the currentStateTimestamp
if (metadataVersion.isAtLeast(IBP_2_1_IV0)) if (version >= 2)
assertEquals(Some(time.milliseconds()), deserializedGroupMetadata.currentStateTimestamp, assertEquals(Some(time.milliseconds()), deserializedGroupMetadata.currentStateTimestamp)
s"the metadataVersion $metadataVersion doesn't set the currentStateTimestamp correctly.")
else else
assertTrue(deserializedGroupMetadata.currentStateTimestamp.isEmpty, assertTrue(deserializedGroupMetadata.currentStateTimestamp.isEmpty)
s"the metadataVersion $metadataVersion should not set the currentStateTimestamp.")
} }
} }
@ -1083,10 +1080,10 @@ class GroupMetadataManagerTest {
val generation = 1 val generation = 1
val protocol = "range" val protocol = "range"
val memberId = "memberId" val memberId = "memberId"
val oldMetadataVersions = Array(IBP_0_9_0, IBP_0_10_1_IV0, IBP_2_1_IV0)
for (metadataVersion <- oldMetadataVersions) { for (version <- 0 to 2) {
val groupMetadataRecord = buildStableGroupRecordWithMember(generation, protocolType, protocol, memberId, metadataVersion = metadataVersion) val groupMetadataRecord = buildStableGroupRecordWithMember(generation, protocolType, protocol, memberId,
groupMetadataValueVersion = version.toShort)
val deserializedGroupMetadata = GroupMetadataManager.readGroupMessageValue(groupId, groupMetadataRecord.value(), time) val deserializedGroupMetadata = GroupMetadataManager.readGroupMessageValue(groupId, groupMetadataRecord.value(), time)
assertEquals(groupId, deserializedGroupMetadata.groupId) assertEquals(groupId, deserializedGroupMetadata.groupId)
@ -2477,10 +2474,11 @@ class GroupMetadataManagerTest {
new TopicPartition("bar", 0) -> 8992L new TopicPartition("bar", 0) -> 8992L
) )
val metadataVersion = IBP_1_1_IV0 val offsetCommitValueVersion = 1.toShort
val offsetCommitRecords = createCommittedOffsetRecords(committedOffsets, metadataVersion = metadataVersion, retentionTimeOpt = Some(100)) val groupMetadataValueVersion = 1.toShort
val offsetCommitRecords = createCommittedOffsetRecords(committedOffsets, offsetCommitValueVersion = offsetCommitValueVersion, retentionTimeOpt = Some(100))
val memberId = "98098230493" val memberId = "98098230493"
val groupMetadataRecord = buildStableGroupRecordWithMember(generation, protocolType, protocol, memberId, metadataVersion = metadataVersion) val groupMetadataRecord = buildStableGroupRecordWithMember(generation, protocolType, protocol, memberId, groupMetadataValueVersion = groupMetadataValueVersion)
val records = MemoryRecords.withRecords(startOffset, Compression.NONE, val records = MemoryRecords.withRecords(startOffset, Compression.NONE,
(offsetCommitRecords ++ Seq(groupMetadataRecord)).toArray: _*) (offsetCommitRecords ++ Seq(groupMetadataRecord)).toArray: _*)
@ -2551,34 +2549,17 @@ class GroupMetadataManagerTest {
time.milliseconds(), time.milliseconds(),
noExpiration) noExpiration)
def verifySerde(metadataVersion: MetadataVersion, expectedOffsetCommitValueVersion: Int): Unit = { val bytes = GroupMetadataManager.offsetCommitValue(offsetAndMetadata)
val bytes = GroupMetadataManager.offsetCommitValue(offsetAndMetadata, metadataVersion)
val buffer = ByteBuffer.wrap(bytes) val buffer = ByteBuffer.wrap(bytes)
val expectedOffsetCommitValueVersion = 3
assertEquals(expectedOffsetCommitValueVersion, buffer.getShort(0).toInt) assertEquals(expectedOffsetCommitValueVersion, buffer.getShort(0).toInt)
val deserializedOffsetAndMetadata = GroupMetadataManager.readOffsetMessageValue(buffer) val deserializedOffsetAndMetadata = GroupMetadataManager.readOffsetMessageValue(buffer)
assertEquals(offsetAndMetadata.committedOffset, deserializedOffsetAndMetadata.committedOffset) assertEquals(offsetAndMetadata.committedOffset, deserializedOffsetAndMetadata.committedOffset)
assertEquals(offsetAndMetadata.metadata, deserializedOffsetAndMetadata.metadata) assertEquals(offsetAndMetadata.metadata, deserializedOffsetAndMetadata.metadata)
assertEquals(offsetAndMetadata.commitTimestampMs, deserializedOffsetAndMetadata.commitTimestampMs) assertEquals(offsetAndMetadata.commitTimestampMs, deserializedOffsetAndMetadata.commitTimestampMs)
val expectedLeaderEpoch = offsetAndMetadata.leaderEpoch
// Serialization drops the leader epoch silently if an older inter-broker protocol is in use
val expectedLeaderEpoch = if (expectedOffsetCommitValueVersion >= 3)
offsetAndMetadata.leaderEpoch
else
noLeader
assertEquals(expectedLeaderEpoch, deserializedOffsetAndMetadata.leaderEpoch) assertEquals(expectedLeaderEpoch, deserializedOffsetAndMetadata.leaderEpoch)
} assertEquals(offsetAndMetadata, deserializedOffsetAndMetadata)
for (version <- MetadataVersion.VERSIONS) {
val expectedSchemaVersion = version match {
case v if v.isLessThan(IBP_2_1_IV0) => 1
case v if v.isLessThan(IBP_2_1_IV1) => 2
case _ => 3
}
verifySerde(version, expectedSchemaVersion)
}
} }
@Test @Test
@ -2593,8 +2574,7 @@ class GroupMetadataManagerTest {
time.milliseconds(), time.milliseconds(),
OptionalLong.of(time.milliseconds() + 1000)) OptionalLong.of(time.milliseconds() + 1000))
def verifySerde(metadataVersion: MetadataVersion): Unit = { val bytes = GroupMetadataManager.offsetCommitValue(offsetAndMetadata)
val bytes = GroupMetadataManager.offsetCommitValue(offsetAndMetadata, metadataVersion)
val buffer = ByteBuffer.wrap(bytes) val buffer = ByteBuffer.wrap(bytes)
assertEquals(1, buffer.getShort(0).toInt) assertEquals(1, buffer.getShort(0).toInt)
@ -2602,38 +2582,6 @@ class GroupMetadataManagerTest {
assertEquals(offsetAndMetadata, deserializedOffsetAndMetadata) assertEquals(offsetAndMetadata, deserializedOffsetAndMetadata)
} }
for (version <- MetadataVersion.VERSIONS)
verifySerde(version)
}
@Test
def testSerdeOffsetCommitValueWithNoneExpireTimestamp(): Unit = {
val offsetAndMetadata = new OffsetAndMetadata(
537L,
noLeader,
"metadata",
time.milliseconds(),
noExpiration)
def verifySerde(metadataVersion: MetadataVersion): Unit = {
val bytes = GroupMetadataManager.offsetCommitValue(offsetAndMetadata, metadataVersion)
val buffer = ByteBuffer.wrap(bytes)
val version = buffer.getShort(0).toInt
if (metadataVersion.isLessThan(IBP_2_1_IV0))
assertEquals(1, version)
else if (metadataVersion.isLessThan(IBP_2_1_IV1))
assertEquals(2, version)
else
assertEquals(3, version)
val deserializedOffsetAndMetadata = GroupMetadataManager.readOffsetMessageValue(buffer)
assertEquals(offsetAndMetadata, deserializedOffsetAndMetadata)
}
for (version <- MetadataVersion.VERSIONS)
verifySerde(version)
}
@Test @Test
def testSerializeGroupMetadataValueToHighestNonFlexibleVersion(): Unit = { def testSerializeGroupMetadataValueToHighestNonFlexibleVersion(): Unit = {
val generation = 935 val generation = 935
@ -2942,20 +2890,20 @@ class GroupMetadataManagerTest {
protocol: String, protocol: String,
memberId: String, memberId: String,
assignmentBytes: Array[Byte] = Array.emptyByteArray, assignmentBytes: Array[Byte] = Array.emptyByteArray,
metadataVersion: MetadataVersion = MetadataVersion.latestTesting): SimpleRecord = { groupMetadataValueVersion: Short = 3): SimpleRecord = {
val memberProtocols = List((protocol, Array.emptyByteArray)) val memberProtocols = List((protocol, Array.emptyByteArray))
val member = new MemberMetadata(memberId, Some(groupInstanceId), "clientId", "clientHost", 30000, 10000, protocolType, memberProtocols) val member = new MemberMetadata(memberId, Some(groupInstanceId), "clientId", "clientHost", 30000, 10000, protocolType, memberProtocols)
val group = GroupMetadata.loadGroup(groupId, Stable, generation, protocolType, protocol, memberId, val group = GroupMetadata.loadGroup(groupId, Stable, generation, protocolType, protocol, memberId,
if (metadataVersion.isAtLeast(IBP_2_1_IV0)) Some(time.milliseconds()) else None, Seq(member), time) if (groupMetadataValueVersion >= 2.toShort) Some(time.milliseconds()) else None, Seq(member), time)
val groupMetadataKey = GroupMetadataManager.groupMetadataKey(groupId) val groupMetadataKey = GroupMetadataManager.groupMetadataKey(groupId)
val groupMetadataValue = GroupMetadataManager.groupMetadataValue(group, Map(memberId -> assignmentBytes), metadataVersion) val groupMetadataValue = GroupMetadataManager.groupMetadataValue(group, Map(memberId -> assignmentBytes), groupMetadataValueVersion)
new SimpleRecord(groupMetadataKey, groupMetadataValue) new SimpleRecord(groupMetadataKey, groupMetadataValue)
} }
private def buildEmptyGroupRecord(generation: Int, protocolType: String): SimpleRecord = { private def buildEmptyGroupRecord(generation: Int, protocolType: String): SimpleRecord = {
val group = GroupMetadata.loadGroup(groupId, Empty, generation, protocolType, null, null, None, Seq.empty, time) val group = GroupMetadata.loadGroup(groupId, Empty, generation, protocolType, null, null, None, Seq.empty, time)
val groupMetadataKey = GroupMetadataManager.groupMetadataKey(groupId) val groupMetadataKey = GroupMetadataManager.groupMetadataKey(groupId)
val groupMetadataValue = GroupMetadataManager.groupMetadataValue(group, Map.empty, MetadataVersion.latestTesting) val groupMetadataValue = GroupMetadataManager.groupMetadataValue(group, Map.empty)
new SimpleRecord(groupMetadataKey, groupMetadataValue) new SimpleRecord(groupMetadataKey, groupMetadataValue)
} }
@ -2999,7 +2947,7 @@ class GroupMetadataManagerTest {
private def createCommittedOffsetRecords(committedOffsets: Map[TopicPartition, Long], private def createCommittedOffsetRecords(committedOffsets: Map[TopicPartition, Long],
groupId: String = groupId, groupId: String = groupId,
metadataVersion: MetadataVersion = MetadataVersion.latestTesting, offsetCommitValueVersion: Short = 3,
retentionTimeOpt: Option[Long] = None): Seq[SimpleRecord] = { retentionTimeOpt: Option[Long] = None): Seq[SimpleRecord] = {
committedOffsets.map { case (topicPartition, offset) => committedOffsets.map { case (topicPartition, offset) =>
val commitTimestamp = time.milliseconds() val commitTimestamp = time.milliseconds()
@ -3011,7 +2959,7 @@ class GroupMetadataManagerTest {
new OffsetAndMetadata(offset, noLeader, "", commitTimestamp, noExpiration) new OffsetAndMetadata(offset, noLeader, "", commitTimestamp, noExpiration)
} }
val offsetCommitKey = GroupMetadataManager.offsetCommitKey(groupId, topicPartition) val offsetCommitKey = GroupMetadataManager.offsetCommitKey(groupId, topicPartition)
val offsetCommitValue = GroupMetadataManager.offsetCommitValue(offsetAndMetadata, metadataVersion) val offsetCommitValue = GroupMetadataManager.offsetCommitValue(offsetAndMetadata, offsetCommitValueVersion)
new SimpleRecord(offsetCommitKey, offsetCommitValue) new SimpleRecord(offsetCommitKey, offsetCommitValue)
}.toSeq }.toSeq
} }

View File

@ -298,10 +298,10 @@ class TransactionMarkerChannelManagerTest {
assertEquals(1, channelManager.queueForBroker(broker2.id).get.totalNumMarkers(txnTopicPartition1)) assertEquals(1, channelManager.queueForBroker(broker2.id).get.totalNumMarkers(txnTopicPartition1))
assertEquals(0, channelManager.queueForBroker(broker2.id).get.totalNumMarkers(txnTopicPartition2)) assertEquals(0, channelManager.queueForBroker(broker2.id).get.totalNumMarkers(txnTopicPartition2))
val expectedBroker1Request = new WriteTxnMarkersRequest.Builder(ApiKeys.WRITE_TXN_MARKERS.latestVersion(), val expectedBroker1Request = new WriteTxnMarkersRequest.Builder(
asList(new WriteTxnMarkersRequest.TxnMarkerEntry(producerId1, producerEpoch, coordinatorEpoch, txnResult, asList(partition1)), asList(new WriteTxnMarkersRequest.TxnMarkerEntry(producerId1, producerEpoch, coordinatorEpoch, txnResult, asList(partition1)),
new WriteTxnMarkersRequest.TxnMarkerEntry(producerId2, producerEpoch, coordinatorEpoch, txnResult, asList(partition1)))).build() new WriteTxnMarkersRequest.TxnMarkerEntry(producerId2, producerEpoch, coordinatorEpoch, txnResult, asList(partition1)))).build()
val expectedBroker2Request = new WriteTxnMarkersRequest.Builder(ApiKeys.WRITE_TXN_MARKERS.latestVersion(), val expectedBroker2Request = new WriteTxnMarkersRequest.Builder(
asList(new WriteTxnMarkersRequest.TxnMarkerEntry(producerId1, producerEpoch, coordinatorEpoch, txnResult, asList(partition2)))).build() asList(new WriteTxnMarkersRequest.TxnMarkerEntry(producerId1, producerEpoch, coordinatorEpoch, txnResult, asList(partition2)))).build()
val requests: Map[Node, WriteTxnMarkersRequest] = channelManager.generateRequests().asScala.map { handler => val requests: Map[Node, WriteTxnMarkersRequest] = channelManager.generateRequests().asScala.map { handler =>
@ -368,10 +368,10 @@ class TransactionMarkerChannelManagerTest {
assertEquals(1, channelManager.queueForUnknownBroker.totalNumMarkers(txnTopicPartition1)) assertEquals(1, channelManager.queueForUnknownBroker.totalNumMarkers(txnTopicPartition1))
assertEquals(1, channelManager.queueForUnknownBroker.totalNumMarkers(txnTopicPartition2)) assertEquals(1, channelManager.queueForUnknownBroker.totalNumMarkers(txnTopicPartition2))
val expectedBroker1Request = new WriteTxnMarkersRequest.Builder(ApiKeys.WRITE_TXN_MARKERS.latestVersion(), val expectedBroker1Request = new WriteTxnMarkersRequest.Builder(
asList(new WriteTxnMarkersRequest.TxnMarkerEntry(producerId1, producerEpoch, coordinatorEpoch, txnResult, asList(partition1)), asList(new WriteTxnMarkersRequest.TxnMarkerEntry(producerId1, producerEpoch, coordinatorEpoch, txnResult, asList(partition1)),
new WriteTxnMarkersRequest.TxnMarkerEntry(producerId2, producerEpoch, coordinatorEpoch, txnResult, asList(partition1)))).build() new WriteTxnMarkersRequest.TxnMarkerEntry(producerId2, producerEpoch, coordinatorEpoch, txnResult, asList(partition1)))).build()
val expectedBroker2Request = new WriteTxnMarkersRequest.Builder(ApiKeys.WRITE_TXN_MARKERS.latestVersion(), val expectedBroker2Request = new WriteTxnMarkersRequest.Builder(
asList(new WriteTxnMarkersRequest.TxnMarkerEntry(producerId1, producerEpoch, coordinatorEpoch, txnResult, asList(partition2)))).build() asList(new WriteTxnMarkersRequest.TxnMarkerEntry(producerId1, producerEpoch, coordinatorEpoch, txnResult, asList(partition2)))).build()
val firstDrainedRequests: Map[Node, WriteTxnMarkersRequest] = channelManager.generateRequests().asScala.map { handler => val firstDrainedRequests: Map[Node, WriteTxnMarkersRequest] = channelManager.generateRequests().asScala.map { handler =>

View File

@ -24,10 +24,9 @@ import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.compress.Compression import org.apache.kafka.common.compress.Compression
import org.apache.kafka.common.config.TopicConfig import org.apache.kafka.common.config.TopicConfig
import org.apache.kafka.common.errors.KafkaStorageException import org.apache.kafka.common.errors.KafkaStorageException
import org.apache.kafka.common.record.{ControlRecordType, DefaultRecordBatch, MemoryRecords, RecordBatch, RecordVersion, SimpleRecord, TimestampType} import org.apache.kafka.common.record.{ControlRecordType, DefaultRecordBatch, MemoryRecords, RecordBatch, SimpleRecord, TimestampType}
import org.apache.kafka.common.utils.{Time, Utils} import org.apache.kafka.common.utils.{Time, Utils}
import org.apache.kafka.coordinator.transaction.TransactionLogConfig import org.apache.kafka.coordinator.transaction.TransactionLogConfig
import org.apache.kafka.server.common.MetadataVersion
import org.apache.kafka.server.util.{MockTime, Scheduler} import org.apache.kafka.server.util.{MockTime, Scheduler}
import org.apache.kafka.storage.internals.epoch.LeaderEpochFileCache import org.apache.kafka.storage.internals.epoch.LeaderEpochFileCache
import org.apache.kafka.storage.internals.log.{AbortedTxn, CleanerConfig, EpochEntry, LocalLog, LogConfig, LogDirFailureChannel, LogFileUtils, LogLoader, LogOffsetMetadata, LogSegment, LogSegments, LogStartOffsetIncrementReason, OffsetIndex, ProducerStateManager, ProducerStateManagerConfig, SnapshotFile} import org.apache.kafka.storage.internals.log.{AbortedTxn, CleanerConfig, EpochEntry, LocalLog, LogConfig, LogDirFailureChannel, LogFileUtils, LogLoader, LogOffsetMetadata, LogSegment, LogSegments, LogStartOffsetIncrementReason, OffsetIndex, ProducerStateManager, ProducerStateManagerConfig, SnapshotFile}
@ -128,7 +127,7 @@ class LogLoaderTest {
brokerTopicStats = new BrokerTopicStats(), brokerTopicStats = new BrokerTopicStats(),
logDirFailureChannel = logDirFailureChannel, logDirFailureChannel = logDirFailureChannel,
time = time, time = time,
keepPartitionMetadataFile = config.usesTopicId, keepPartitionMetadataFile = true,
remoteStorageSystemEnable = config.remoteLogManagerConfig.isRemoteStorageSystemEnabled(), remoteStorageSystemEnable = config.remoteLogManagerConfig.isRemoteStorageSystemEnabled(),
initialTaskDelayMs = config.logInitialTaskDelayMs) { initialTaskDelayMs = config.logInitialTaskDelayMs) {
@ -246,70 +245,7 @@ class LogLoaderTest {
} }
@Test @Test
def testProducerSnapshotsRecoveryAfterUncleanShutdownV1(): Unit = { def testProducerSnapshotsRecoveryAfterUncleanShutdown(): Unit = {
testProducerSnapshotsRecoveryAfterUncleanShutdown(MetadataVersion.minSupportedFor(RecordVersion.V1).version)
}
@Test
def testProducerSnapshotsRecoveryAfterUncleanShutdownCurrentMessageFormat(): Unit = {
testProducerSnapshotsRecoveryAfterUncleanShutdown(MetadataVersion.latestTesting.version)
}
private def createLog(dir: File,
config: LogConfig,
brokerTopicStats: BrokerTopicStats = brokerTopicStats,
logStartOffset: Long = 0L,
recoveryPoint: Long = 0L,
scheduler: Scheduler = mockTime.scheduler,
time: Time = mockTime,
maxTransactionTimeoutMs: Int = maxTransactionTimeoutMs,
maxProducerIdExpirationMs: Int = producerStateManagerConfig.producerIdExpirationMs,
producerIdExpirationCheckIntervalMs: Int = producerIdExpirationCheckIntervalMs,
lastShutdownClean: Boolean = true): UnifiedLog = {
val log = LogTestUtils.createLog(dir, config, brokerTopicStats, scheduler, time, logStartOffset, recoveryPoint,
maxTransactionTimeoutMs, new ProducerStateManagerConfig(maxProducerIdExpirationMs, false), producerIdExpirationCheckIntervalMs, lastShutdownClean)
logsToClose = logsToClose :+ log
log
}
private def createLogWithOffsetOverflow(logConfig: LogConfig): (UnifiedLog, LogSegment) = {
LogTestUtils.initializeLogDirWithOverflowedSegment(logDir)
val log = createLog(logDir, logConfig, recoveryPoint = Long.MaxValue)
val segmentWithOverflow = LogTestUtils.firstOverflowSegment(log).getOrElse {
throw new AssertionError("Failed to create log with a segment which has overflowed offsets")
}
(log, segmentWithOverflow)
}
private def recoverAndCheck(config: LogConfig, expectedKeys: Iterable[Long]): UnifiedLog = {
// method is called only in case of recovery from hard reset
val recoveredLog = LogTestUtils.recoverAndCheck(logDir, config, expectedKeys, brokerTopicStats, mockTime, mockTime.scheduler)
logsToClose = logsToClose :+ recoveredLog
recoveredLog
}
/**
* Wrap a single record log buffer with leader epoch.
*/
private def singletonRecordsWithLeaderEpoch(value: Array[Byte],
key: Array[Byte] = null,
leaderEpoch: Int,
offset: Long,
codec: Compression = Compression.NONE,
timestamp: Long = RecordBatch.NO_TIMESTAMP,
magicValue: Byte = RecordBatch.CURRENT_MAGIC_VALUE): MemoryRecords = {
val records = Seq(new SimpleRecord(timestamp, key, value))
val buf = ByteBuffer.allocate(DefaultRecordBatch.sizeInBytes(records.asJava))
val builder = MemoryRecords.builder(buf, magicValue, codec, TimestampType.CREATE_TIME, offset,
mockTime.milliseconds, leaderEpoch)
records.foreach(builder.append)
builder.build()
}
private def testProducerSnapshotsRecoveryAfterUncleanShutdown(messageFormatVersion: String): Unit = {
val logProps = new Properties() val logProps = new Properties()
logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, "640") logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, "640")
val logConfig = new LogConfig(logProps) val logConfig = new LogConfig(logProps)
@ -413,6 +349,60 @@ class LogLoaderTest {
log.close() log.close()
} }
private def createLog(dir: File,
config: LogConfig,
brokerTopicStats: BrokerTopicStats = brokerTopicStats,
logStartOffset: Long = 0L,
recoveryPoint: Long = 0L,
scheduler: Scheduler = mockTime.scheduler,
time: Time = mockTime,
maxTransactionTimeoutMs: Int = maxTransactionTimeoutMs,
maxProducerIdExpirationMs: Int = producerStateManagerConfig.producerIdExpirationMs,
producerIdExpirationCheckIntervalMs: Int = producerIdExpirationCheckIntervalMs,
lastShutdownClean: Boolean = true): UnifiedLog = {
val log = LogTestUtils.createLog(dir, config, brokerTopicStats, scheduler, time, logStartOffset, recoveryPoint,
maxTransactionTimeoutMs, new ProducerStateManagerConfig(maxProducerIdExpirationMs, false), producerIdExpirationCheckIntervalMs, lastShutdownClean)
logsToClose = logsToClose :+ log
log
}
private def createLogWithOffsetOverflow(logConfig: LogConfig): (UnifiedLog, LogSegment) = {
LogTestUtils.initializeLogDirWithOverflowedSegment(logDir)
val log = createLog(logDir, logConfig, recoveryPoint = Long.MaxValue)
val segmentWithOverflow = LogTestUtils.firstOverflowSegment(log).getOrElse {
throw new AssertionError("Failed to create log with a segment which has overflowed offsets")
}
(log, segmentWithOverflow)
}
private def recoverAndCheck(config: LogConfig, expectedKeys: Iterable[Long]): UnifiedLog = {
// method is called only in case of recovery from hard reset
val recoveredLog = LogTestUtils.recoverAndCheck(logDir, config, expectedKeys, brokerTopicStats, mockTime, mockTime.scheduler)
logsToClose = logsToClose :+ recoveredLog
recoveredLog
}
/**
* Wrap a single record log buffer with leader epoch.
*/
private def singletonRecordsWithLeaderEpoch(value: Array[Byte],
key: Array[Byte] = null,
leaderEpoch: Int,
offset: Long,
codec: Compression = Compression.NONE,
timestamp: Long = RecordBatch.NO_TIMESTAMP,
magicValue: Byte = RecordBatch.CURRENT_MAGIC_VALUE): MemoryRecords = {
val records = Seq(new SimpleRecord(timestamp, key, value))
val buf = ByteBuffer.allocate(DefaultRecordBatch.sizeInBytes(records.asJava))
val builder = MemoryRecords.builder(buf, magicValue, codec, TimestampType.CREATE_TIME, offset,
mockTime.milliseconds, leaderEpoch)
records.foreach(builder.append)
builder.build()
}
@Test @Test
def testSkipLoadingIfEmptyProducerStateBeforeTruncation(): Unit = { def testSkipLoadingIfEmptyProducerStateBeforeTruncation(): Unit = {
val maxTransactionTimeoutMs = 60000 val maxTransactionTimeoutMs = 60000

View File

@ -23,7 +23,6 @@ import org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion
import org.apache.kafka.common.message.ApiMessageType import org.apache.kafka.common.message.ApiMessageType
import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.network.ListenerName
import org.apache.kafka.common.protocol.ApiKeys import org.apache.kafka.common.protocol.ApiKeys
import org.apache.kafka.common.record.RecordVersion
import org.apache.kafka.common.requests.{ApiVersionsRequest, ApiVersionsResponse, RequestUtils} import org.apache.kafka.common.requests.{ApiVersionsRequest, ApiVersionsResponse, RequestUtils}
import org.apache.kafka.common.utils.Utils import org.apache.kafka.common.utils.Utils
import org.apache.kafka.server.common.{EligibleLeaderReplicasVersion, GroupVersion, MetadataVersion, TransactionVersion} import org.apache.kafka.server.common.{EligibleLeaderReplicasVersion, GroupVersion, MetadataVersion, TransactionVersion}
@ -95,7 +94,6 @@ abstract class AbstractApiVersionsRequestTest(cluster: ClusterInstance) {
} else { } else {
ApiVersionsResponse.intersectForwardableApis( ApiVersionsResponse.intersectForwardableApis(
ApiMessageType.ListenerType.BROKER, ApiMessageType.ListenerType.BROKER,
RecordVersion.current,
NodeApiVersions.create(ApiKeys.controllerApis().asScala.map(ApiVersionsResponse.toApiVersion).asJava).allSupportedApiVersions(), NodeApiVersions.create(ApiKeys.controllerApis().asScala.map(ApiVersionsResponse.toApiVersion).asJava).allSupportedApiVersions(),
enableUnstableLastVersion, enableUnstableLastVersion,
clientTelemetryEnabled clientTelemetryEnabled

View File

@ -343,8 +343,6 @@ class AbstractFetcherManagerTest {
override protected def logEndOffset(topicPartition: TopicPartition): Long = 1 override protected def logEndOffset(topicPartition: TopicPartition): Long = 1
override protected def endOffsetForEpoch(topicPartition: TopicPartition, epoch: Int): Option[OffsetAndEpoch] = Some(new OffsetAndEpoch(1, 0)) override protected def endOffsetForEpoch(topicPartition: TopicPartition, epoch: Int): Option[OffsetAndEpoch] = Some(new OffsetAndEpoch(1, 0))
override protected val isOffsetForLeaderEpochSupported: Boolean = false
} }
} }

View File

@ -390,43 +390,6 @@ class AbstractFetcherThreadTest {
assertEquals(leaderState.highWatermark, replicaState.highWatermark) assertEquals(leaderState.highWatermark, replicaState.highWatermark)
} }
@Test
def testTruncateToHighWatermarkIfLeaderEpochRequestNotSupported(): Unit = {
val highWatermark = 2L
val partition = new TopicPartition("topic", 0)
val mockLeaderEndPoint = new MockLeaderEndPoint(truncateOnFetch = truncateOnFetch, version = version) {
override def fetchEpochEndOffsets(partitions: Map[TopicPartition, EpochData]): Map[TopicPartition, EpochEndOffset] =
throw new UnsupportedOperationException
override val isTruncationOnFetchSupported: Boolean = false
}
val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndPoint)
val fetcher = new MockFetcherThread(mockLeaderEndPoint, mockTierStateMachine) {
override def truncate(topicPartition: TopicPartition, truncationState: OffsetTruncationState): Unit = {
assertEquals(highWatermark, truncationState.offset)
assertTrue(truncationState.truncationCompleted)
super.truncate(topicPartition, truncationState)
}
override protected val isOffsetForLeaderEpochSupported: Boolean = false
}
val replicaLog = Seq(
mkBatch(baseOffset = 0, leaderEpoch = 0, new SimpleRecord("a".getBytes)),
mkBatch(baseOffset = 1, leaderEpoch = 2, new SimpleRecord("b".getBytes)),
mkBatch(baseOffset = 2, leaderEpoch = 4, new SimpleRecord("c".getBytes)))
val replicaState = PartitionState(replicaLog, leaderEpoch = 5, highWatermark)
fetcher.setReplicaState(partition, replicaState)
fetcher.addPartitions(Map(partition -> initialFetchState(topicIds.get(partition.topic), highWatermark, leaderEpoch = 5)))
fetcher.mockLeader.setReplicaPartitionStateCallback(fetcher.replicaPartitionState)
fetcher.doWork()
assertEquals(highWatermark, replicaState.logEndOffset)
assertEquals(highWatermark, fetcher.fetchState(partition).get.fetchOffset)
assertTrue(fetcher.fetchState(partition).get.isReadyForFetch)
}
@Test @Test
def testTruncateToHighWatermarkIfLeaderEpochInfoNotAvailable(): Unit = { def testTruncateToHighWatermarkIfLeaderEpochInfoNotAvailable(): Unit = {
val highWatermark = 2L val highWatermark = 2L

View File

@ -32,7 +32,7 @@ import org.apache.kafka.common.requests.RequestHeader
import org.apache.kafka.common.requests.{AbstractRequest, AlterPartitionRequest, AlterPartitionResponse} import org.apache.kafka.common.requests.{AbstractRequest, AlterPartitionRequest, AlterPartitionResponse}
import org.apache.kafka.metadata.{LeaderAndIsr, LeaderRecoveryState} import org.apache.kafka.metadata.{LeaderAndIsr, LeaderRecoveryState}
import org.apache.kafka.server.common.{ControllerRequestCompletionHandler, MetadataVersion, NodeToControllerChannelManager} import org.apache.kafka.server.common.{ControllerRequestCompletionHandler, MetadataVersion, NodeToControllerChannelManager}
import org.apache.kafka.server.common.MetadataVersion.{IBP_2_7_IV2, IBP_3_2_IV0, IBP_3_5_IV1} import org.apache.kafka.server.common.MetadataVersion.{IBP_3_0_IV1, IBP_3_2_IV0, IBP_3_5_IV1}
import org.apache.kafka.server.util.{MockScheduler, MockTime} import org.apache.kafka.server.util.{MockScheduler, MockTime}
import org.apache.kafka.test.TestUtils.assertFutureThrows import org.apache.kafka.test.TestUtils.assertFutureThrows
import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Assertions._
@ -99,7 +99,6 @@ class AlterPartitionManagerTest {
.setTopicName(topic) .setTopicName(topic)
.setTopicId(topicId) .setTopicId(topicId)
if (metadataVersion.isTopicIdsSupported) {
val newIsrWithBrokerEpoch = new ListBuffer[BrokerState]() val newIsrWithBrokerEpoch = new ListBuffer[BrokerState]()
newIsrWithBrokerEpoch.append(new BrokerState().setBrokerId(1).setBrokerEpoch(101)) newIsrWithBrokerEpoch.append(new BrokerState().setBrokerId(1).setBrokerEpoch(101))
newIsrWithBrokerEpoch.append(new BrokerState().setBrokerId(2).setBrokerEpoch(102)) newIsrWithBrokerEpoch.append(new BrokerState().setBrokerId(2).setBrokerEpoch(102))
@ -109,13 +108,6 @@ class AlterPartitionManagerTest {
.setLeaderEpoch(1) .setLeaderEpoch(1)
.setPartitionEpoch(10) .setPartitionEpoch(10)
.setNewIsrWithEpochs(newIsrWithBrokerEpoch.toList.asJava)) .setNewIsrWithEpochs(newIsrWithBrokerEpoch.toList.asJava))
} else {
topicData.partitions.add(new AlterPartitionRequestData.PartitionData()
.setPartitionIndex(0)
.setLeaderEpoch(1)
.setPartitionEpoch(10)
.setNewIsr(List(1, 2, 3).map(Integer.valueOf).asJava))
}
expectedAlterPartitionData.topics.add(topicData) expectedAlterPartitionData.topics.add(topicData)
@ -148,7 +140,6 @@ class AlterPartitionManagerTest {
@ParameterizedTest @ParameterizedTest
@MethodSource(Array("provideMetadataVersions")) @MethodSource(Array("provideMetadataVersions"))
def testOverwriteWithinBatch(metadataVersion: MetadataVersion): Unit = { def testOverwriteWithinBatch(metadataVersion: MetadataVersion): Unit = {
val canUseTopicIds = metadataVersion.isAtLeast(MetadataVersion.IBP_2_8_IV0)
val capture: ArgumentCaptor[AbstractRequest.Builder[AlterPartitionRequest]] = ArgumentCaptor.forClass(classOf[AbstractRequest.Builder[AlterPartitionRequest]]) val capture: ArgumentCaptor[AbstractRequest.Builder[AlterPartitionRequest]] = ArgumentCaptor.forClass(classOf[AbstractRequest.Builder[AlterPartitionRequest]])
val callbackCapture: ArgumentCaptor[ControllerRequestCompletionHandler] = ArgumentCaptor.forClass(classOf[ControllerRequestCompletionHandler]) val callbackCapture: ArgumentCaptor[ControllerRequestCompletionHandler] = ArgumentCaptor.forClass(classOf[ControllerRequestCompletionHandler])
@ -168,7 +159,7 @@ class AlterPartitionManagerTest {
val alterPartitionResp = partitionResponse() val alterPartitionResp = partitionResponse()
val resp = makeClientResponse( val resp = makeClientResponse(
response = alterPartitionResp, response = alterPartitionResp,
version = if (canUseTopicIds) ApiKeys.ALTER_PARTITION.latestVersion else 1 version = ApiKeys.ALTER_PARTITION.latestVersion
) )
verify(brokerToController).sendRequest(capture.capture(), callbackCapture.capture()) verify(brokerToController).sendRequest(capture.capture(), callbackCapture.capture())
callbackCapture.getValue.onComplete(resp) callbackCapture.getValue.onComplete(resp)
@ -422,11 +413,7 @@ class AlterPartitionManagerTest {
@ParameterizedTest @ParameterizedTest
@MethodSource(Array("provideMetadataVersions")) @MethodSource(Array("provideMetadataVersions"))
def testPartitionMissingInResponse(metadataVersion: MetadataVersion): Unit = { def testPartitionMissingInResponse(metadataVersion: MetadataVersion): Unit = {
val expectedVersion = if (metadataVersion.isTopicIdsSupported) { val expectedVersion = ApiKeys.ALTER_PARTITION.latestVersion
ApiKeys.ALTER_PARTITION.latestVersion
} else {
1.toShort
}
val leaderAndIsr = new LeaderAndIsr(1, 1, List(1, 2, 3).map(Int.box).asJava, LeaderRecoveryState.RECOVERED, 10) val leaderAndIsr = new LeaderAndIsr(1, 1, List(1, 2, 3).map(Int.box).asJava, LeaderRecoveryState.RECOVERED, 10)
val controlledEpoch = 0 val controlledEpoch = 0
val brokerEpoch = 2 val brokerEpoch = 2
@ -486,74 +473,6 @@ class AlterPartitionManagerTest {
assertFutureThrows(future2, classOf[UnknownServerException]) assertFutureThrows(future2, classOf[UnknownServerException])
} }
@ParameterizedTest
@MethodSource(Array("provideMetadataVersions"))
def testPartialTopicIds(metadataVersion: MetadataVersion): Unit = {
val canUseTopicIds = metadataVersion.isAtLeast(MetadataVersion.IBP_2_8_IV0)
val foo = new TopicIdPartition(Uuid.ZERO_UUID, 0, "foo")
val bar = new TopicIdPartition(Uuid.randomUuid(), 0, "bar")
val zar = new TopicIdPartition(Uuid.randomUuid(), 0, "zar")
val leaderAndIsr = new LeaderAndIsr(1, 1, List(1, 2, 3).map(Int.box).asJava, LeaderRecoveryState.RECOVERED, 10)
val controlledEpoch = 0
val brokerEpoch = 2
val scheduler = new MockScheduler(time)
val brokerToController = Mockito.mock(classOf[NodeToControllerChannelManager])
val alterPartitionManager = new DefaultAlterPartitionManager(
brokerToController,
scheduler,
time,
brokerId,
() => brokerEpoch,
() => metadataVersion
)
alterPartitionManager.start()
// Submits an alter isr update with zar, which has a topic id.
val future1 = alterPartitionManager.submit(zar, leaderAndIsr, controlledEpoch)
// The latest version is expected if all the submitted partitions
// have topic ids and IBP >= 2.8; version 1 should be used otherwise.
val callback1 = verifySendRequest(brokerToController, alterPartitionRequestMatcher(
expectedTopicPartitions = Set(zar),
expectedVersion = if (canUseTopicIds) ApiKeys.ALTER_PARTITION.latestVersion else 1
))
// Submits two additional alter isr changes with foo and bar while the previous one
// is still inflight. foo has no topic id, bar has one.
val future2 = alterPartitionManager.submit(foo, leaderAndIsr, controlledEpoch)
val future3 = alterPartitionManager.submit(bar, leaderAndIsr, controlledEpoch)
// Completes the first request. That triggers the next one.
callback1.onComplete(makeClientResponse(
response = makeAlterPartition(Seq(makeAlterPartitionTopicData(zar, Errors.NONE))),
version = if (canUseTopicIds) ApiKeys.ALTER_PARTITION.latestVersion else 1
))
assertTrue(future1.isDone)
assertFalse(future2.isDone)
assertFalse(future3.isDone)
// Version 1 is expected because foo does not have a topic id.
val callback2 = verifySendRequest(brokerToController, alterPartitionRequestMatcher(
expectedTopicPartitions = Set(foo, bar),
expectedVersion = 1
))
// Completes the second request.
callback2.onComplete(makeClientResponse(
response = makeAlterPartition(Seq(
makeAlterPartitionTopicData(foo, Errors.NONE),
makeAlterPartitionTopicData(bar, Errors.NONE),
)),
version = 1
))
assertTrue(future1.isDone)
assertTrue(future2.isDone)
assertTrue(future3.isDone)
}
private def verifySendRequest( private def verifySendRequest(
brokerToController: NodeToControllerChannelManager, brokerToController: NodeToControllerChannelManager,
expectedRequest: ArgumentMatcher[AbstractRequest.Builder[_ <: AbstractRequest]] expectedRequest: ArgumentMatcher[AbstractRequest.Builder[_ <: AbstractRequest]]
@ -609,25 +528,6 @@ class AlterPartitionManagerTest {
) )
} }
private def makeAlterPartition(
topics: Seq[AlterPartitionResponseData.TopicData]
): AlterPartitionResponse = {
new AlterPartitionResponse(new AlterPartitionResponseData().setTopics(topics.asJava))
}
private def makeAlterPartitionTopicData(
topicIdPartition: TopicIdPartition,
error: Errors
): AlterPartitionResponseData.TopicData = {
new AlterPartitionResponseData.TopicData()
.setTopicName(topicIdPartition.topic)
.setTopicId(topicIdPartition.topicId)
.setPartitions(Collections.singletonList(
new AlterPartitionResponseData.PartitionData()
.setPartitionIndex(topicIdPartition.partition)
.setErrorCode(error.code)))
}
private def partitionResponse( private def partitionResponse(
tp: TopicIdPartition = tp0, tp: TopicIdPartition = tp0,
error: Errors = Errors.NONE, error: Errors = Errors.NONE,
@ -660,7 +560,7 @@ object AlterPartitionManagerTest {
// Supports KIP-704: unclean leader recovery // Supports KIP-704: unclean leader recovery
IBP_3_2_IV0, IBP_3_2_IV0,
// Supports KIP-497: alter partition // Supports KIP-497: alter partition
IBP_2_7_IV2 IBP_3_0_IV1
) )
} }

View File

@ -409,7 +409,7 @@ class ControllerApisTest {
assertThrows(classOf[ClusterAuthorizationException], () => { assertThrows(classOf[ClusterAuthorizationException], () => {
controllerApis = createControllerApis(Some(createDenyAllAuthorizer()), new MockController.Builder().build()) controllerApis = createControllerApis(Some(createDenyAllAuthorizer()), new MockController.Builder().build())
controllerApis.handleAlterPartitionRequest(buildRequest(new AlterPartitionRequest.Builder( controllerApis.handleAlterPartitionRequest(buildRequest(new AlterPartitionRequest.Builder(
new AlterPartitionRequestData(), false).build(0))) new AlterPartitionRequestData()).build(0)))
}) })
} }

View File

@ -30,7 +30,7 @@ class FinalizedFeatureCacheTest {
@Test @Test
def testEmpty(): Unit = { def testEmpty(): Unit = {
assertTrue(new ZkMetadataCache(1, MetadataVersion.IBP_2_8_IV1, BrokerFeatures.createDefault(true)).getFeatureOption.isEmpty) assertTrue(new ZkMetadataCache(1, MetadataVersion.IBP_3_0_IV1, BrokerFeatures.createDefault(true)).getFeatureOption.isEmpty)
} }
def asJava(input: Map[String, Short]): java.util.Map[String, java.lang.Short] = { def asJava(input: Map[String, Short]): java.util.Map[String, java.lang.Short] = {
@ -45,7 +45,7 @@ class FinalizedFeatureCacheTest {
val finalizedFeatures = Map[String, Short]("feature_1" -> 4) val finalizedFeatures = Map[String, Short]("feature_1" -> 4)
val cache = new ZkMetadataCache(1, MetadataVersion.IBP_2_8_IV1, brokerFeatures) val cache = new ZkMetadataCache(1, MetadataVersion.IBP_3_0_IV1, brokerFeatures)
cache.updateFeaturesOrThrow(finalizedFeatures, 10) cache.updateFeaturesOrThrow(finalizedFeatures, 10)
assertTrue(cache.getFeatureOption.isDefined) assertTrue(cache.getFeatureOption.isDefined)
assertEquals(asJava(finalizedFeatures), cache.getFeatureOption.get.finalizedFeatures()) assertEquals(asJava(finalizedFeatures), cache.getFeatureOption.get.finalizedFeatures())
@ -67,7 +67,7 @@ class FinalizedFeatureCacheTest {
val finalizedFeatures = Map[String, Short]("feature_1" -> 2) val finalizedFeatures = Map[String, Short]("feature_1" -> 2)
val cache = new ZkMetadataCache(1, MetadataVersion.IBP_2_8_IV1, brokerFeatures) val cache = new ZkMetadataCache(1, MetadataVersion.IBP_3_0_IV1, brokerFeatures)
assertThrows(classOf[FeatureCacheUpdateException], () => cache.updateFeaturesOrThrow(finalizedFeatures, 12)) assertThrows(classOf[FeatureCacheUpdateException], () => cache.updateFeaturesOrThrow(finalizedFeatures, 12))
// Check that the failed updateOrThrow call did not make any mutations. // Check that the failed updateOrThrow call did not make any mutations.
@ -82,7 +82,7 @@ class FinalizedFeatureCacheTest {
val finalizedFeatures = Map[String, Short]("feature_1" -> 3) val finalizedFeatures = Map[String, Short]("feature_1" -> 3)
val cache = new ZkMetadataCache(1, MetadataVersion.IBP_2_8_IV1, brokerFeatures) val cache = new ZkMetadataCache(1, MetadataVersion.IBP_3_0_IV1, brokerFeatures)
cache.updateFeaturesOrThrow(finalizedFeatures, 12) cache.updateFeaturesOrThrow(finalizedFeatures, 12)
assertTrue(cache.getFeatureOption.isDefined) assertTrue(cache.getFeatureOption.isDefined)
assertEquals(asJava(finalizedFeatures), cache.getFeatureOption.get.finalizedFeatures()) assertEquals(asJava(finalizedFeatures), cache.getFeatureOption.get.finalizedFeatures())
@ -97,7 +97,7 @@ class FinalizedFeatureCacheTest {
val finalizedFeatures = Map[String, Short]("feature_1" -> 3) val finalizedFeatures = Map[String, Short]("feature_1" -> 3)
val cache = new ZkMetadataCache(1, MetadataVersion.IBP_2_8_IV1, brokerFeatures) val cache = new ZkMetadataCache(1, MetadataVersion.IBP_3_0_IV1, brokerFeatures)
cache.updateFeaturesOrThrow(finalizedFeatures, 12) cache.updateFeaturesOrThrow(finalizedFeatures, 12)
assertTrue(cache.getFeatureOption.isDefined) assertTrue(cache.getFeatureOption.isDefined)
assertEquals(asJava(finalizedFeatures), cache.getFeatureOption.get.finalizedFeatures()) assertEquals(asJava(finalizedFeatures), cache.getFeatureOption.get.finalizedFeatures())

View File

@ -2839,7 +2839,7 @@ class KafkaApisTest extends Logging {
// This test verifies the response will not be sent prematurely because of calling replicaManager append // This test verifies the response will not be sent prematurely because of calling replicaManager append
// with no records. // with no records.
val topicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, 0) val topicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, 0)
val writeTxnMarkersRequest = new WriteTxnMarkersRequest.Builder(ApiKeys.WRITE_TXN_MARKERS.latestVersion(), val writeTxnMarkersRequest = new WriteTxnMarkersRequest.Builder(
asList( asList(
new TxnMarkerEntry(1, 1.toShort, 0, TransactionResult.COMMIT, asList(topicPartition)), new TxnMarkerEntry(1, 1.toShort, 0, TransactionResult.COMMIT, asList(topicPartition)),
new TxnMarkerEntry(2, 1.toShort, 0, TransactionResult.COMMIT, asList(topicPartition)), new TxnMarkerEntry(2, 1.toShort, 0, TransactionResult.COMMIT, asList(topicPartition)),
@ -2973,7 +2973,6 @@ class KafkaApisTest extends Logging {
) )
val writeTxnMarkersRequest = new WriteTxnMarkersRequest.Builder( val writeTxnMarkersRequest = new WriteTxnMarkersRequest.Builder(
ApiKeys.WRITE_TXN_MARKERS.latestVersion(),
List( List(
new TxnMarkerEntry( new TxnMarkerEntry(
1L, 1L,
@ -3099,7 +3098,6 @@ class KafkaApisTest extends Logging {
) )
val writeTxnMarkersRequest = new WriteTxnMarkersRequest.Builder( val writeTxnMarkersRequest = new WriteTxnMarkersRequest.Builder(
ApiKeys.WRITE_TXN_MARKERS.latestVersion(),
List( List(
new TxnMarkerEntry( new TxnMarkerEntry(
1L, 1L,
@ -3225,7 +3223,6 @@ class KafkaApisTest extends Logging {
val offset0 = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, 0) val offset0 = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, 0)
val writeTxnMarkersRequest = new WriteTxnMarkersRequest.Builder( val writeTxnMarkersRequest = new WriteTxnMarkersRequest.Builder(
ApiKeys.WRITE_TXN_MARKERS.latestVersion(),
List( List(
new TxnMarkerEntry( new TxnMarkerEntry(
1L, 1L,
@ -9705,7 +9702,7 @@ class KafkaApisTest extends Logging {
} }
private def createWriteTxnMarkersRequest(partitions: util.List[TopicPartition]) = { private def createWriteTxnMarkersRequest(partitions: util.List[TopicPartition]) = {
val writeTxnMarkersRequest = new WriteTxnMarkersRequest.Builder(ApiKeys.WRITE_TXN_MARKERS.latestVersion(), val writeTxnMarkersRequest = new WriteTxnMarkersRequest.Builder(
asList(new TxnMarkerEntry(1, 1.toShort, 0, TransactionResult.COMMIT, partitions))).build() asList(new TxnMarkerEntry(1, 1.toShort, 0, TransactionResult.COMMIT, partitions))).build()
(writeTxnMarkersRequest, buildRequest(writeTxnMarkersRequest)) (writeTxnMarkersRequest, buildRequest(writeTxnMarkersRequest))
} }

View File

@ -38,7 +38,6 @@ import org.apache.kafka.coordinator.transaction.{TransactionLogConfig, Transacti
import org.apache.kafka.network.SocketServerConfigs import org.apache.kafka.network.SocketServerConfigs
import org.apache.kafka.raft.QuorumConfig import org.apache.kafka.raft.QuorumConfig
import org.apache.kafka.server.common.MetadataVersion import org.apache.kafka.server.common.MetadataVersion
import org.apache.kafka.server.common.MetadataVersion.IBP_0_8_2
import org.apache.kafka.server.config.{DelegationTokenManagerConfigs, KRaftConfigs, QuotaConfig, ReplicationConfigs, ServerConfigs, ServerLogConfigs, ServerTopicConfigSynonyms, ZkConfigs} import org.apache.kafka.server.config.{DelegationTokenManagerConfigs, KRaftConfigs, QuotaConfig, ReplicationConfigs, ServerConfigs, ServerLogConfigs, ServerTopicConfigSynonyms, ZkConfigs}
import org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig import org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig
import org.apache.kafka.server.metrics.MetricConfigs import org.apache.kafka.server.metrics.MetricConfigs
@ -613,16 +612,16 @@ class KafkaConfigTest {
val conf = KafkaConfig.fromProps(props) val conf = KafkaConfig.fromProps(props)
assertEquals(MetadataVersion.latestProduction, conf.interBrokerProtocolVersion) assertEquals(MetadataVersion.latestProduction, conf.interBrokerProtocolVersion)
props.setProperty(ReplicationConfigs.INTER_BROKER_PROTOCOL_VERSION_CONFIG, "0.8.2.0") props.setProperty(ReplicationConfigs.INTER_BROKER_PROTOCOL_VERSION_CONFIG, "3.0.0-IV1")
val conf2 = KafkaConfig.fromProps(props) val conf2 = KafkaConfig.fromProps(props)
assertEquals(IBP_0_8_2, conf2.interBrokerProtocolVersion) assertEquals(MetadataVersion.IBP_3_0_IV1, conf2.interBrokerProtocolVersion)
// check that 0.8.2.0 is the same as 0.8.2.1 // check that patch version doesn't affect equality
props.setProperty(ReplicationConfigs.INTER_BROKER_PROTOCOL_VERSION_CONFIG, "0.8.2.1") props.setProperty(ReplicationConfigs.INTER_BROKER_PROTOCOL_VERSION_CONFIG, "3.0.1-IV1")
val conf3 = KafkaConfig.fromProps(props) val conf3 = KafkaConfig.fromProps(props)
assertEquals(IBP_0_8_2, conf3.interBrokerProtocolVersion) assertEquals(MetadataVersion.IBP_3_0_IV1, conf3.interBrokerProtocolVersion)
//check that latest is newer than 0.8.2 //check that latest is newer than 3.0.1-IV0
assertTrue(MetadataVersion.latestTesting.isAtLeast(conf3.interBrokerProtocolVersion)) assertTrue(MetadataVersion.latestTesting.isAtLeast(conf3.interBrokerProtocolVersion))
} }
@ -1623,15 +1622,6 @@ class KafkaConfigTest {
} }
} }
@Test
def testInvalidInterBrokerProtocolVersionKRaft(): Unit = {
val props = new Properties()
props.putAll(kraftProps())
props.setProperty(ReplicationConfigs.INTER_BROKER_PROTOCOL_VERSION_CONFIG, "2.8")
assertEquals("A non-KRaft version 2.8 given for inter.broker.protocol.version. The minimum version is 3.0-IV1",
assertThrows(classOf[ConfigException], () => new KafkaConfig(props)).getMessage)
}
@Test @Test
def testDefaultInterBrokerProtocolVersionKRaft(): Unit = { def testDefaultInterBrokerProtocolVersionKRaft(): Unit = {
val props = new Properties() val props = new Properties()

View File

@ -163,6 +163,4 @@ class MockFetcherThread(val mockLeader: MockLeaderEndPoint,
assertEquals(expectedEpoch, fetchState(partition).flatMap(_.lastFetchedEpoch)) assertEquals(expectedEpoch, fetchState(partition).flatMap(_.lastFetchedEpoch))
} }
} }
override protected val isOffsetForLeaderEpochSupported: Boolean = true
} }

View File

@ -22,7 +22,7 @@ import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderPartition import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderPartition
import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderTopic import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderTopic
import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderTopicCollection import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderTopicCollection
import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.record.RecordBatch import org.apache.kafka.common.record.RecordBatch
import org.apache.kafka.common.requests.{OffsetsForLeaderEpochRequest, OffsetsForLeaderEpochResponse} import org.apache.kafka.common.requests.{OffsetsForLeaderEpochRequest, OffsetsForLeaderEpochResponse}
import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Assertions._
@ -40,8 +40,7 @@ class OffsetsForLeaderEpochRequestTest extends BaseRequestTest {
val partition = new TopicPartition(topic, 0) val partition = new TopicPartition(topic, 0)
val epochs = offsetForLeaderTopicCollectionFor(partition, 0, RecordBatch.NO_PARTITION_LEADER_EPOCH) val epochs = offsetForLeaderTopicCollectionFor(partition, 0, RecordBatch.NO_PARTITION_LEADER_EPOCH)
val request = OffsetsForLeaderEpochRequest.Builder.forFollower( val request = OffsetsForLeaderEpochRequest.Builder.forFollower(epochs, 1).build()
ApiKeys.OFFSET_FOR_LEADER_EPOCH.latestVersion, epochs, 1).build()
// Unknown topic // Unknown topic
val randomBrokerId = brokers.head.config.brokerId val randomBrokerId = brokers.head.config.brokerId
@ -69,8 +68,7 @@ class OffsetsForLeaderEpochRequestTest extends BaseRequestTest {
def assertResponseErrorForEpoch(error: Errors, brokerId: Int, currentLeaderEpoch: Optional[Integer]): Unit = { def assertResponseErrorForEpoch(error: Errors, brokerId: Int, currentLeaderEpoch: Optional[Integer]): Unit = {
val epochs = offsetForLeaderTopicCollectionFor(topicPartition, 0, val epochs = offsetForLeaderTopicCollectionFor(topicPartition, 0,
currentLeaderEpoch.orElse(RecordBatch.NO_PARTITION_LEADER_EPOCH)) currentLeaderEpoch.orElse(RecordBatch.NO_PARTITION_LEADER_EPOCH))
val request = OffsetsForLeaderEpochRequest.Builder.forFollower( val request = OffsetsForLeaderEpochRequest.Builder.forFollower(epochs, 1).build()
ApiKeys.OFFSET_FOR_LEADER_EPOCH.latestVersion, epochs, 1).build()
assertResponseError(error, brokerId, request) assertResponseError(error, brokerId, request)
} }

View File

@ -29,7 +29,6 @@ import org.apache.kafka.common.{TopicIdPartition, TopicPartition, Uuid}
import org.apache.kafka.common.message.{FetchResponseData, UpdateMetadataRequestData} import org.apache.kafka.common.message.{FetchResponseData, UpdateMetadataRequestData}
import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderPartition import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderPartition
import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset
import org.apache.kafka.common.protocol.Errors._
import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.protocol.{ApiKeys, Errors}
import org.apache.kafka.common.record.{CompressionType, MemoryRecords, RecordBatch, RecordValidationStats, SimpleRecord} import org.apache.kafka.common.record.{CompressionType, MemoryRecords, RecordBatch, RecordValidationStats, SimpleRecord}
import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.{UNDEFINED_EPOCH, UNDEFINED_EPOCH_OFFSET} import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.{UNDEFINED_EPOCH, UNDEFINED_EPOCH_OFFSET}
@ -38,17 +37,16 @@ import org.apache.kafka.common.utils.{LogContext, Time}
import org.apache.kafka.server.BrokerFeatures import org.apache.kafka.server.BrokerFeatures
import org.apache.kafka.server.config.ReplicationConfigs import org.apache.kafka.server.config.ReplicationConfigs
import org.apache.kafka.server.common.{MetadataVersion, OffsetAndEpoch} import org.apache.kafka.server.common.{MetadataVersion, OffsetAndEpoch}
import org.apache.kafka.server.common.MetadataVersion.IBP_2_6_IV0
import org.apache.kafka.server.network.BrokerEndPoint import org.apache.kafka.server.network.BrokerEndPoint
import org.apache.kafka.storage.internals.log.LogAppendInfo import org.apache.kafka.storage.internals.log.LogAppendInfo
import org.apache.kafka.storage.log.metrics.BrokerTopicStats import org.apache.kafka.storage.log.metrics.BrokerTopicStats
import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api.{AfterEach, Disabled, Test} import org.junit.jupiter.api.{AfterEach, Test}
import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.ParameterizedTest
import org.junit.jupiter.params.provider.ValueSource import org.junit.jupiter.params.provider.ValueSource
import org.mockito.ArgumentCaptor import org.mockito.ArgumentCaptor
import org.mockito.ArgumentMatchers.{any, anyBoolean, anyLong} import org.mockito.ArgumentMatchers.{any, anyBoolean, anyLong}
import org.mockito.Mockito.{mock, never, times, verify, when} import org.mockito.Mockito.{mock, times, verify, when}
import java.nio.charset.StandardCharsets import java.nio.charset.StandardCharsets
import java.util import java.util
@ -130,96 +128,12 @@ class ReplicaFetcherThreadTest {
ApiKeys.FETCH.latestVersion(true), ApiKeys.FETCH.latestVersion(true),
testingVersion.fetchRequestVersion testingVersion.fetchRequestVersion
) )
assertEquals(
ApiKeys.OFFSET_FOR_LEADER_EPOCH.latestVersion(true),
testingVersion.offsetForLeaderEpochRequestVersion
)
assertEquals( assertEquals(
ApiKeys.LIST_OFFSETS.latestVersion(true), ApiKeys.LIST_OFFSETS.latestVersion(true),
testingVersion.listOffsetRequestVersion testingVersion.listOffsetRequestVersion
) )
} }
@Disabled("KAFKA-18370")
@Test
def testFetchLeaderEpochRequestIfLastEpochDefinedForSomePartitions(): Unit = {
val config = kafkaConfigNoTruncateOnFetch
//Setup all dependencies
val quota: ReplicationQuotaManager = mock(classOf[ReplicationQuotaManager])
val logManager: LogManager = mock(classOf[LogManager])
val replicaAlterLogDirsManager: ReplicaAlterLogDirsManager = mock(classOf[ReplicaAlterLogDirsManager])
val log: UnifiedLog = mock(classOf[UnifiedLog])
val partition: Partition = mock(classOf[Partition])
val replicaManager: ReplicaManager = mock(classOf[ReplicaManager])
val leaderEpoch = 5
//Stubs
when(partition.localLogOrException).thenReturn(log)
when(log.logEndOffset).thenReturn(0)
when(log.highWatermark).thenReturn(0)
when(log.latestEpoch)
.thenReturn(Some(leaderEpoch))
.thenReturn(Some(leaderEpoch))
.thenReturn(None) // t2p1 doesn't support epochs
when(log.endOffsetForEpoch(leaderEpoch)).thenReturn(
Some(new OffsetAndEpoch(0, leaderEpoch)))
when(replicaManager.metadataCache).thenReturn(metadataCache)
when(replicaManager.logManager).thenReturn(logManager)
when(replicaManager.replicaAlterLogDirsManager).thenReturn(replicaAlterLogDirsManager)
when(replicaManager.brokerTopicStats).thenReturn(mock(classOf[BrokerTopicStats]))
stub(partition, replicaManager, log)
//Define the offsets for the OffsetsForLeaderEpochResponse
val offsets = Map(
t1p0 -> newOffsetForLeaderPartitionResult(t1p0, leaderEpoch, 1),
t1p1 -> newOffsetForLeaderPartitionResult(t1p1, leaderEpoch, 1)).asJava
//Create the fetcher thread
val mockNetwork = new MockBlockingSender(offsets, brokerEndPoint, Time.SYSTEM)
val thread = createReplicaFetcherThread(
"bob",
0,
config,
failedPartitions,
replicaManager,
quota,
mockNetwork)
// topic 1 supports epoch, t2 doesn't.
thread.addPartitions(Map(
t1p0 -> initialFetchState(Some(topicId1), 0L),
t1p1 -> initialFetchState(Some(topicId2), 0L),
t2p1 -> initialFetchState(Some(topicId2), 0L)))
assertPartitionStates(thread, shouldBeReadyForFetch = false, shouldBeTruncatingLog = true, shouldBeDelayed = false)
//Loop 1
thread.doWork()
assertEquals(1, mockNetwork.epochFetchCount)
assertEquals(1, mockNetwork.fetchCount)
assertPartitionStates(thread, shouldBeReadyForFetch = true, shouldBeTruncatingLog = false, shouldBeDelayed = false)
//Loop 2 we should not fetch epochs
thread.doWork()
assertEquals(1, mockNetwork.epochFetchCount)
assertEquals(2, mockNetwork.fetchCount)
assertPartitionStates(thread, shouldBeReadyForFetch = true, shouldBeTruncatingLog = false, shouldBeDelayed = false)
//Loop 3 we should not fetch epochs
thread.doWork()
assertEquals(1, mockNetwork.epochFetchCount)
assertEquals(3, mockNetwork.fetchCount)
assertPartitionStates(thread, shouldBeReadyForFetch = true, shouldBeTruncatingLog = false, shouldBeDelayed = false)
//Assert that truncate to is called exactly once (despite two loops)
verify(partition, times(3)).truncateTo(anyLong(), anyBoolean())
}
/** /**
* Assert that all partitions' states are as expected * Assert that all partitions' states are as expected
* *
@ -281,18 +195,12 @@ class ReplicaFetcherThreadTest {
verify(mockBlockingSend).sendRequest(any()) verify(mockBlockingSend).sendRequest(any())
} }
@Disabled("KAFKA-18370")
@Test
def shouldFetchLeaderEpochOnFirstFetchOnlyIfLeaderEpochKnownToBothIbp26(): Unit = {
verifyFetchLeaderEpochOnFirstFetch(IBP_2_6_IV0)
}
@Test @Test
def shouldNotFetchLeaderEpochOnFirstFetchWithTruncateOnFetch(): Unit = { def shouldNotFetchLeaderEpochOnFirstFetchWithTruncateOnFetch(): Unit = {
verifyFetchLeaderEpochOnFirstFetch(MetadataVersion.latestTesting, epochFetchCount = 0) verifyFetchLeaderEpochOnFirstFetch(MetadataVersion.latestTesting, epochFetchCount = 0)
} }
private def verifyFetchLeaderEpochOnFirstFetch(ibp: MetadataVersion, epochFetchCount: Int = 1): Unit = { private def verifyFetchLeaderEpochOnFirstFetch(ibp: MetadataVersion, epochFetchCount: Int): Unit = {
val props = TestUtils.createBrokerConfig(1) val props = TestUtils.createBrokerConfig(1)
props.setProperty(ReplicationConfigs.INTER_BROKER_PROTOCOL_VERSION_CONFIG, ibp.version) props.setProperty(ReplicationConfigs.INTER_BROKER_PROTOCOL_VERSION_CONFIG, ibp.version)
val config = KafkaConfig.fromProps(props) val config = KafkaConfig.fromProps(props)
@ -355,213 +263,6 @@ class ReplicaFetcherThreadTest {
assertEquals(3, mockNetwork.fetchCount) assertEquals(3, mockNetwork.fetchCount)
} }
@Disabled("KAFKA-18370")
@Test
def shouldTruncateToOffsetSpecifiedInEpochOffsetResponse(): Unit = {
//Create a capture to track what partitions/offsets are truncated
val truncateToCapture: ArgumentCaptor[Long] = ArgumentCaptor.forClass(classOf[Long])
// Setup all the dependencies
val config = kafkaConfigNoTruncateOnFetch
val quota: ReplicationQuotaManager = mock(classOf[ReplicationQuotaManager])
val logManager: LogManager = mock(classOf[LogManager])
val replicaAlterLogDirsManager: ReplicaAlterLogDirsManager = mock(classOf[ReplicaAlterLogDirsManager])
val log: UnifiedLog = mock(classOf[UnifiedLog])
val partition: Partition = mock(classOf[Partition])
val replicaManager: ReplicaManager = mock(classOf[ReplicaManager])
val leaderEpoch = 5
val initialLEO = 200
//Stubs
when(partition.localLogOrException).thenReturn(log)
when(log.highWatermark).thenReturn(initialLEO - 1)
when(log.latestEpoch).thenReturn(Some(leaderEpoch))
when(log.endOffsetForEpoch(leaderEpoch)).thenReturn(
Some(new OffsetAndEpoch(initialLEO, leaderEpoch)))
when(log.logEndOffset).thenReturn(initialLEO)
when(replicaManager.metadataCache).thenReturn(metadataCache)
when(replicaManager.localLogOrException(any[TopicPartition])).thenReturn(log)
when(replicaManager.logManager).thenReturn(logManager)
when(replicaManager.replicaAlterLogDirsManager).thenReturn(replicaAlterLogDirsManager)
when(replicaManager.brokerTopicStats).thenReturn(mock(classOf[BrokerTopicStats]))
stub(partition, replicaManager, log)
//Define the offsets for the OffsetsForLeaderEpochResponse, these are used for truncation
val offsetsReply = Map(
t1p0 -> newOffsetForLeaderPartitionResult(t1p0, leaderEpoch, 156),
t2p1 -> newOffsetForLeaderPartitionResult(t2p1, leaderEpoch, 172)).asJava
//Create the thread
val mockNetwork = new MockBlockingSender(offsetsReply, brokerEndPoint, Time.SYSTEM)
val thread = createReplicaFetcherThread(
"bob",
0,
config,
failedPartitions,
replicaManager,
quota,
mockNetwork
)
thread.addPartitions(Map(t1p0 -> initialFetchState(Some(topicId1), 0L), t2p1 -> initialFetchState(Some(topicId2), 0L)))
//Run it
thread.doWork()
//We should have truncated to the offsets in the response
verify(partition, times(2)).truncateTo(truncateToCapture.capture(), anyBoolean())
assertTrue(truncateToCapture.getAllValues.asScala.contains(156),
"Expected " + t1p0 + " to truncate to offset 156 (truncation offsets: " + truncateToCapture.getAllValues + ")")
assertTrue(truncateToCapture.getAllValues.asScala.contains(172),
"Expected " + t2p1 + " to truncate to offset 172 (truncation offsets: " + truncateToCapture.getAllValues + ")")
}
@Disabled("KAFKA-18370")
@Test
def shouldTruncateToOffsetSpecifiedInEpochOffsetResponseIfFollowerHasNoMoreEpochs(): Unit = {
// Create a capture to track what partitions/offsets are truncated
val truncateToCapture: ArgumentCaptor[Long] = ArgumentCaptor.forClass(classOf[Long])
// Setup all the dependencies
val config = kafkaConfigNoTruncateOnFetch
val quota: ReplicationQuotaManager = mock(classOf[ReplicationQuotaManager])
val logManager: LogManager = mock(classOf[LogManager])
val replicaAlterLogDirsManager: ReplicaAlterLogDirsManager = mock(classOf[ReplicaAlterLogDirsManager])
val log: UnifiedLog = mock(classOf[UnifiedLog])
val partition: Partition = mock(classOf[Partition])
val replicaManager: ReplicaManager = mock(classOf[ReplicaManager])
val leaderEpochAtFollower = 5
val leaderEpochAtLeader = 4
val initialLEO = 200
//Stubs
when(partition.localLogOrException).thenReturn(log)
when(log.highWatermark).thenReturn(initialLEO - 3)
when(log.latestEpoch).thenReturn(Some(leaderEpochAtFollower))
when(log.endOffsetForEpoch(leaderEpochAtLeader)).thenReturn(None)
when(log.logEndOffset).thenReturn(initialLEO)
when(replicaManager.metadataCache).thenReturn(metadataCache)
when(replicaManager.localLogOrException(any[TopicPartition])).thenReturn(log)
when(replicaManager.logManager).thenReturn(logManager)
when(replicaManager.replicaAlterLogDirsManager).thenReturn(replicaAlterLogDirsManager)
when(replicaManager.brokerTopicStats).thenReturn(mock(classOf[BrokerTopicStats]))
stub(partition, replicaManager, log)
//Define the offsets for the OffsetsForLeaderEpochResponse, these are used for truncation
val offsetsReply = Map(
t1p0 -> newOffsetForLeaderPartitionResult(t1p0, leaderEpochAtLeader, 156),
t2p1 -> newOffsetForLeaderPartitionResult(t2p1, leaderEpochAtLeader, 202)).asJava
//Create the thread
val mockNetwork = new MockBlockingSender(offsetsReply, brokerEndPoint, Time.SYSTEM)
val thread = createReplicaFetcherThread(
"bob",
0,
config,
failedPartitions,
replicaManager,
quota,
mockNetwork
)
thread.addPartitions(Map(t1p0 -> initialFetchState(Some(topicId1), 0L), t2p1 -> initialFetchState(Some(topicId2), 0L)))
//Run it
thread.doWork()
//We should have truncated to the offsets in the response
verify(partition, times(2)).truncateTo(truncateToCapture.capture(), anyBoolean())
assertTrue(truncateToCapture.getAllValues.asScala.contains(156),
"Expected " + t1p0 + " to truncate to offset 156 (truncation offsets: " + truncateToCapture.getAllValues + ")")
assertTrue(truncateToCapture.getAllValues.asScala.contains(initialLEO),
"Expected " + t2p1 + " to truncate to offset " + initialLEO +
" (truncation offsets: " + truncateToCapture.getAllValues + ")")
}
@Disabled("KAFKA-18370")
@Test
def shouldFetchLeaderEpochSecondTimeIfLeaderRepliesWithEpochNotKnownToFollower(): Unit = {
// Create a capture to track what partitions/offsets are truncated
val truncateToCapture: ArgumentCaptor[Long] = ArgumentCaptor.forClass(classOf[Long])
val config = kafkaConfigNoTruncateOnFetch
// Setup all dependencies
val quota: ReplicationQuotaManager = mock(classOf[ReplicationQuotaManager])
val logManager: LogManager = mock(classOf[LogManager])
val replicaAlterLogDirsManager: ReplicaAlterLogDirsManager = mock(classOf[ReplicaAlterLogDirsManager])
val log: UnifiedLog = mock(classOf[UnifiedLog])
val partition: Partition = mock(classOf[Partition])
val replicaManager: ReplicaManager = mock(classOf[ReplicaManager])
val initialLEO = 200
// Stubs
when(partition.localLogOrException).thenReturn(log)
when(log.highWatermark).thenReturn(initialLEO - 2)
when(log.latestEpoch).thenReturn(Some(5))
when(log.endOffsetForEpoch(4)).thenReturn(
Some(new OffsetAndEpoch(120, 3)))
when(log.endOffsetForEpoch(3)).thenReturn(
Some(new OffsetAndEpoch(120, 3)))
when(log.logEndOffset).thenReturn(initialLEO)
when(replicaManager.metadataCache).thenReturn(metadataCache)
when(replicaManager.localLogOrException(any[TopicPartition])).thenReturn(log)
when(replicaManager.logManager).thenReturn(logManager)
when(replicaManager.replicaAlterLogDirsManager).thenReturn(replicaAlterLogDirsManager)
when(replicaManager.brokerTopicStats).thenReturn(mock(classOf[BrokerTopicStats]))
stub(partition, replicaManager, log)
// Define the offsets for the OffsetsForLeaderEpochResponse
val offsets = Map(
t1p0 -> newOffsetForLeaderPartitionResult(t1p0, 4, 155),
t1p1 -> newOffsetForLeaderPartitionResult(t1p1, 4, 143)).asJava
// Create the fetcher thread
val mockNetwork = new MockBlockingSender(offsets, brokerEndPoint, Time.SYSTEM)
val thread = createReplicaFetcherThread(
"bob",
0,
config,
failedPartitions,
replicaManager,
quota,
mockNetwork
)
thread.addPartitions(Map(t1p0 -> initialFetchState(Some(topicId1), 0L), t1p1 -> initialFetchState(Some(topicId1), 0L)))
// Loop 1 -- both topic partitions will need to fetch another leader epoch
thread.doWork()
assertEquals(1, mockNetwork.epochFetchCount)
assertEquals(0, mockNetwork.fetchCount)
// Loop 2 should do the second fetch for both topic partitions because the leader replied with
// epoch 4 while follower knows only about epoch 3
val nextOffsets = Map(
t1p0 -> newOffsetForLeaderPartitionResult(t1p0, 3, 101),
t1p1 -> newOffsetForLeaderPartitionResult(t1p1, 3, 102)).asJava
mockNetwork.setOffsetsForNextResponse(nextOffsets)
thread.doWork()
assertEquals(2, mockNetwork.epochFetchCount)
assertEquals(1, mockNetwork.fetchCount)
assertTrue(mockNetwork.lastUsedOffsetForLeaderEpochVersion >= 3,
"OffsetsForLeaderEpochRequest version.")
//Loop 3 we should not fetch epochs
thread.doWork()
assertEquals(2, mockNetwork.epochFetchCount)
assertEquals(2, mockNetwork.fetchCount)
verify(partition, times(4)).truncateTo(truncateToCapture.capture(), anyBoolean())
//We should have truncated to the offsets in the second response
assertTrue(truncateToCapture.getAllValues.asScala.contains(102),
"Expected " + t1p1 + " to truncate to offset 102 (truncation offsets: " + truncateToCapture.getAllValues + ")")
assertTrue(truncateToCapture.getAllValues.asScala.contains(101),
"Expected " + t1p0 + " to truncate to offset 101 (truncation offsets: " + truncateToCapture.getAllValues + ")")
}
@Test @Test
def shouldTruncateIfLeaderRepliesWithDivergingEpochNotKnownToFollower(): Unit = { def shouldTruncateIfLeaderRepliesWithDivergingEpochNotKnownToFollower(): Unit = {
@ -853,329 +554,6 @@ class ReplicaFetcherThreadTest {
assertEquals(Some(lastFetchedEpoch), thread.fetchState(t1p0).flatMap(_.lastFetchedEpoch)) assertEquals(Some(lastFetchedEpoch), thread.fetchState(t1p0).flatMap(_.lastFetchedEpoch))
} }
@Disabled("KAFKA-18370")
@Test
def shouldUseLeaderEndOffsetIfInterBrokerVersionBelow20(): Unit = {
// Create a capture to track what partitions/offsets are truncated
val truncateToCapture: ArgumentCaptor[Long] = ArgumentCaptor.forClass(classOf[Long])
val props = TestUtils.createBrokerConfig(1)
props.put(ReplicationConfigs.INTER_BROKER_PROTOCOL_VERSION_CONFIG, "0.11.0")
val config = KafkaConfig.fromProps(props)
// Setup all dependencies
val quota: ReplicationQuotaManager = mock(classOf[ReplicationQuotaManager])
val logManager: LogManager = mock(classOf[LogManager])
val replicaAlterLogDirsManager: ReplicaAlterLogDirsManager = mock(classOf[ReplicaAlterLogDirsManager])
val log: UnifiedLog = mock(classOf[UnifiedLog])
val partition: Partition = mock(classOf[Partition])
val replicaManager: ReplicaManager = mock(classOf[ReplicaManager])
val initialLEO = 200
// Stubs
when(partition.localLogOrException).thenReturn(log)
when(log.highWatermark).thenReturn(initialLEO - 2)
when(log.latestEpoch).thenReturn(Some(5))
when(log.endOffsetForEpoch(4)).thenReturn(
Some(new OffsetAndEpoch(120, 3)))
when(log.endOffsetForEpoch(3)).thenReturn(
Some(new OffsetAndEpoch(120, 3)))
when(log.logEndOffset).thenReturn(initialLEO)
when(replicaManager.metadataCache).thenReturn(metadataCache)
when(replicaManager.localLogOrException(any[TopicPartition])).thenReturn(log)
when(replicaManager.logManager).thenReturn(logManager)
when(replicaManager.replicaAlterLogDirsManager).thenReturn(replicaAlterLogDirsManager)
when(replicaManager.brokerTopicStats).thenReturn(mock(classOf[BrokerTopicStats]))
stub(partition, replicaManager, log)
// Define the offsets for the OffsetsForLeaderEpochResponse with undefined epoch to simulate
// older protocol version
val offsets = Map(
t1p0 -> newOffsetForLeaderPartitionResult(t1p0, UNDEFINED_EPOCH, 155),
t1p1 -> newOffsetForLeaderPartitionResult(t1p1, UNDEFINED_EPOCH, 143)).asJava
// Create the fetcher thread
val mockNetwork = new MockBlockingSender(offsets, brokerEndPoint, Time.SYSTEM)
val thread = createReplicaFetcherThread(
"bob",
0,
config,
failedPartitions,
replicaManager,
quota,
mockNetwork
)
thread.addPartitions(Map(t1p0 -> initialFetchState(Some(topicId1), 0L), t1p1 -> initialFetchState(Some(topicId1), 0L)))
// Loop 1 -- both topic partitions will truncate to leader offset even though they don't know
// about leader epoch
thread.doWork()
assertEquals(1, mockNetwork.epochFetchCount)
assertEquals(1, mockNetwork.fetchCount)
assertEquals(0, mockNetwork.lastUsedOffsetForLeaderEpochVersion, "OffsetsForLeaderEpochRequest version.")
//Loop 2 we should not fetch epochs
thread.doWork()
assertEquals(1, mockNetwork.epochFetchCount)
assertEquals(2, mockNetwork.fetchCount)
//We should have truncated to the offsets in the first response
verify(partition, times(2)).truncateTo(truncateToCapture.capture(), anyBoolean())
assertTrue(truncateToCapture.getAllValues.asScala.contains(155),
"Expected " + t1p0 + " to truncate to offset 155 (truncation offsets: " + truncateToCapture.getAllValues + ")")
assertTrue(truncateToCapture.getAllValues.asScala.contains(143),
"Expected " + t1p1 + " to truncate to offset 143 (truncation offsets: " + truncateToCapture.getAllValues + ")")
}
@Disabled("KAFKA-18370")
@Test
def shouldTruncateToInitialFetchOffsetIfLeaderReturnsUndefinedOffset(): Unit = {
//Create a capture to track what partitions/offsets are truncated
val truncated: ArgumentCaptor[Long] = ArgumentCaptor.forClass(classOf[Long])
// Setup all the dependencies
val config = kafkaConfigNoTruncateOnFetch
val quota: ReplicationQuotaManager = mock(classOf[ReplicationQuotaManager])
val logManager: LogManager = mock(classOf[LogManager])
val replicaAlterLogDirsManager: ReplicaAlterLogDirsManager = mock(classOf[ReplicaAlterLogDirsManager])
val log: UnifiedLog = mock(classOf[UnifiedLog])
val partition: Partition = mock(classOf[Partition])
val replicaManager: ReplicaManager = mock(classOf[ReplicaManager])
val initialFetchOffset = 100
//Stubs
when(partition.localLogOrException).thenReturn(log)
when(log.highWatermark).thenReturn(initialFetchOffset)
when(log.latestEpoch).thenReturn(Some(5))
when(replicaManager.metadataCache).thenReturn(metadataCache)
when(replicaManager.logManager).thenReturn(logManager)
when(replicaManager.replicaAlterLogDirsManager).thenReturn(replicaAlterLogDirsManager)
when(replicaManager.brokerTopicStats).thenReturn(mock(classOf[BrokerTopicStats]))
stub(partition, replicaManager, log)
//Define the offsets for the OffsetsForLeaderEpochResponse, these are used for truncation
val offsetsReply = Map(
t1p0 -> newOffsetForLeaderPartitionResult(t1p0, UNDEFINED_EPOCH, UNDEFINED_EPOCH_OFFSET)).asJava
//Create the thread
val mockNetwork = new MockBlockingSender(offsetsReply, brokerEndPoint, Time.SYSTEM)
val thread = createReplicaFetcherThread(
"bob",
0,
config,
failedPartitions,
replicaManager,
quota,
mockNetwork
)
thread.addPartitions(Map(t1p0 -> initialFetchState(Some(topicId1), initialFetchOffset)))
//Run it
thread.doWork()
//We should have truncated to initial fetch offset
verify(partition).truncateTo(truncated.capture(), anyBoolean())
assertEquals(initialFetchOffset, truncated.getValue)
}
@Disabled("KAFKA-18370")
@Test
def shouldPollIndefinitelyIfLeaderReturnsAnyException(): Unit = {
//Create a capture to track what partitions/offsets are truncated
val truncated: ArgumentCaptor[Long] = ArgumentCaptor.forClass(classOf[Long])
// Setup all the dependencies
val config = kafkaConfigNoTruncateOnFetch
val quota: ReplicationQuotaManager = mock(classOf[ReplicationQuotaManager])
val logManager: LogManager = mock(classOf[LogManager])
val replicaAlterLogDirsManager: ReplicaAlterLogDirsManager = mock(classOf[ReplicaAlterLogDirsManager])
val log: UnifiedLog = mock(classOf[UnifiedLog])
val partition: Partition = mock(classOf[Partition])
val replicaManager: ReplicaManager = mock(classOf[ReplicaManager])
val leaderEpoch = 5
val highWaterMark = 100
val initialLeo = 300
//Stubs
when(log.highWatermark).thenReturn(highWaterMark)
when(partition.localLogOrException).thenReturn(log)
when(log.latestEpoch).thenReturn(Some(leaderEpoch))
// this is for the last reply with EpochEndOffset(5, 156)
when(log.endOffsetForEpoch(leaderEpoch)).thenReturn(
Some(new OffsetAndEpoch(initialLeo, leaderEpoch)))
when(log.logEndOffset).thenReturn(initialLeo)
when(replicaManager.metadataCache).thenReturn(metadataCache)
when(replicaManager.localLogOrException(any[TopicPartition])).thenReturn(log)
when(replicaManager.logManager).thenReturn(logManager)
when(replicaManager.replicaAlterLogDirsManager).thenReturn(replicaAlterLogDirsManager)
when(replicaManager.brokerTopicStats).thenReturn(mock(classOf[BrokerTopicStats]))
stub(partition, replicaManager, log)
//Define the offsets for the OffsetsForLeaderEpochResponse, these are used for truncation
val offsetsReply = mutable.Map(
t1p0 -> newOffsetForLeaderPartitionResult(t1p0, NOT_LEADER_OR_FOLLOWER, UNDEFINED_EPOCH, UNDEFINED_EPOCH_OFFSET),
t1p1 -> newOffsetForLeaderPartitionResult(t1p1, UNKNOWN_SERVER_ERROR, UNDEFINED_EPOCH, UNDEFINED_EPOCH_OFFSET)
).asJava
//Create the thread
val mockNetwork = new MockBlockingSender(offsetsReply, brokerEndPoint, Time.SYSTEM)
val thread = createReplicaFetcherThread(
"bob",
0,
config,
failedPartitions,
replicaManager,
quota,
mockNetwork
)
thread.addPartitions(Map(t1p0 -> initialFetchState(Some(topicId1), 0L), t1p1 -> initialFetchState(Some(topicId1), 0L)))
//Run thread 3 times
(0 to 3).foreach { _ =>
thread.doWork()
}
//Then should loop continuously while there is no leader
verify(partition, never()).truncateTo(anyLong(), anyBoolean())
//New leader elected and replies
offsetsReply.put(t1p0, newOffsetForLeaderPartitionResult(t1p0, leaderEpoch, 156))
thread.doWork()
//Now the final call should have actually done a truncation (to offset 156)
verify(partition).truncateTo(truncated.capture(), anyBoolean())
assertEquals(156, truncated.getValue)
}
@Disabled("KAFKA-18370")
@Test
def shouldMovePartitionsOutOfTruncatingLogState(): Unit = {
val config = kafkaConfigNoTruncateOnFetch
//Setup all stubs
val quota: ReplicationQuotaManager = mock(classOf[ReplicationQuotaManager])
val logManager: LogManager = mock(classOf[LogManager])
val replicaAlterLogDirsManager: ReplicaAlterLogDirsManager = mock(classOf[ReplicaAlterLogDirsManager])
val log: UnifiedLog = mock(classOf[UnifiedLog])
val partition: Partition = mock(classOf[Partition])
val replicaManager: ReplicaManager = mock(classOf[ReplicaManager])
val leaderEpoch = 4
//Stub return values
when(partition.localLogOrException).thenReturn(log)
when(log.highWatermark).thenReturn(0)
when(log.latestEpoch).thenReturn(Some(leaderEpoch))
when(log.endOffsetForEpoch(leaderEpoch)).thenReturn(
Some(new OffsetAndEpoch(0, leaderEpoch)))
when(replicaManager.metadataCache).thenReturn(metadataCache)
when(replicaManager.logManager).thenReturn(logManager)
when(replicaManager.replicaAlterLogDirsManager).thenReturn(replicaAlterLogDirsManager)
stub(partition, replicaManager, log)
//Define the offsets for the OffsetsForLeaderEpochResponse
val offsetsReply = Map(
t1p0 -> newOffsetForLeaderPartitionResult(t1p0, leaderEpoch, 1),
t1p1 -> newOffsetForLeaderPartitionResult(t1p1, leaderEpoch, 1)
).asJava
//Create the fetcher thread
val mockNetwork = new MockBlockingSender(offsetsReply, brokerEndPoint, Time.SYSTEM)
val thread = createReplicaFetcherThread(
"bob",
0,
config,
failedPartitions,
replicaManager,
quota,
mockNetwork
)
//When
thread.addPartitions(Map(t1p0 -> initialFetchState(Some(topicId1), 0L), t1p1 -> initialFetchState(Some(topicId1), 0L)))
//Then all partitions should start in an TruncatingLog state
assertEquals(Option(Truncating), thread.fetchState(t1p0).map(_.state))
assertEquals(Option(Truncating), thread.fetchState(t1p1).map(_.state))
//When
thread.doWork()
//Then none should be TruncatingLog anymore
assertEquals(Option(Fetching), thread.fetchState(t1p0).map(_.state))
assertEquals(Option(Fetching), thread.fetchState(t1p1).map(_.state))
verify(partition, times(2)).truncateTo(0L, false)
}
@Disabled("KAFKA-18370")
@Test
def shouldFilterPartitionsMadeLeaderDuringLeaderEpochRequest(): Unit ={
val config = kafkaConfigNoTruncateOnFetch
val truncateToCapture: ArgumentCaptor[Long] = ArgumentCaptor.forClass(classOf[Long])
val initialLEO = 100
//Setup all stubs
val quota: ReplicationQuotaManager = mock(classOf[ReplicationQuotaManager])
val logManager: LogManager = mock(classOf[LogManager])
val replicaAlterLogDirsManager: ReplicaAlterLogDirsManager = mock(classOf[ReplicaAlterLogDirsManager])
val log: UnifiedLog = mock(classOf[UnifiedLog])
val partition: Partition = mock(classOf[Partition])
val replicaManager: ReplicaManager = mock(classOf[ReplicaManager])
//Stub return values
when(partition.localLogOrException).thenReturn(log)
when(log.highWatermark).thenReturn(initialLEO - 2)
when(log.latestEpoch).thenReturn(Some(5))
when(log.endOffsetForEpoch(5)).thenReturn(Some(new OffsetAndEpoch(initialLEO, 5)))
when(log.logEndOffset).thenReturn(initialLEO)
when(replicaManager.metadataCache).thenReturn(metadataCache)
when(replicaManager.localLogOrException(any[TopicPartition])).thenReturn(log)
when(replicaManager.logManager).thenReturn(logManager)
when(replicaManager.replicaAlterLogDirsManager).thenReturn(replicaAlterLogDirsManager)
stub(partition, replicaManager, log)
//Define the offsets for the OffsetsForLeaderEpochResponse
val offsetsReply = Map(
t1p0 -> newOffsetForLeaderPartitionResult(t1p0, 5, 52),
t1p1 -> newOffsetForLeaderPartitionResult(t1p1, 5, 49)
).asJava
//Create the fetcher thread
val mockNetwork = new MockBlockingSender(offsetsReply, brokerEndPoint, Time.SYSTEM)
val thread = createReplicaFetcherThread(
"bob",
0,
config,
failedPartitions,
replicaManager,
quota,
mockNetwork
)
//When
thread.addPartitions(Map(t1p0 -> initialFetchState(Some(topicId1), 0L), t1p1 -> initialFetchState(Some(topicId1), 0L)))
//When the epoch request is outstanding, remove one of the partitions to simulate a leader change. We do this via a callback passed to the mock thread
val partitionThatBecameLeader = t1p0
mockNetwork.setEpochRequestCallback(() => {
thread.removePartitions(Set(partitionThatBecameLeader))
})
//When
thread.doWork()
//Then we should not have truncated the partition that became leader. Exactly one partition should be truncated.
verify(partition).truncateTo(truncateToCapture.capture(), anyBoolean())
assertEquals(49, truncateToCapture.getValue)
}
@Test @Test
def shouldCatchExceptionFromBlockingSendWhenShuttingDownReplicaFetcherThread(): Unit = { def shouldCatchExceptionFromBlockingSendWhenShuttingDownReplicaFetcherThread(): Unit = {
val props = TestUtils.createBrokerConfig(1) val props = TestUtils.createBrokerConfig(1)
@ -1448,10 +826,4 @@ class ReplicaFetcherThreadTest {
when(replicaManager.localLogOrException(t2p1)).thenReturn(log) when(replicaManager.localLogOrException(t2p1)).thenReturn(log)
when(replicaManager.getPartitionOrException(t2p1)).thenReturn(partition) when(replicaManager.getPartitionOrException(t2p1)).thenReturn(partition)
} }
private def kafkaConfigNoTruncateOnFetch: KafkaConfig = {
val props = TestUtils.createBrokerConfig(1)
props.setProperty(ReplicationConfigs.INTER_BROKER_PROTOCOL_VERSION_CONFIG, IBP_2_6_IV0.version)
KafkaConfig.fromProps(props)
}
} }

View File

@ -479,7 +479,7 @@ class RequestQuotaTest extends BaseRequestTest {
) )
case ApiKeys.WRITE_TXN_MARKERS => case ApiKeys.WRITE_TXN_MARKERS =>
new WriteTxnMarkersRequest.Builder(ApiKeys.WRITE_TXN_MARKERS.latestVersion(), List.empty.asJava) new WriteTxnMarkersRequest.Builder(java.util.List.of[WriteTxnMarkersRequest.TxnMarkerEntry])
case ApiKeys.TXN_OFFSET_COMMIT => case ApiKeys.TXN_OFFSET_COMMIT =>
new TxnOffsetCommitRequest.Builder( new TxnOffsetCommitRequest.Builder(
@ -638,7 +638,7 @@ class RequestQuotaTest extends BaseRequestTest {
Topic.CLUSTER_METADATA_TOPIC_PARTITION)) Topic.CLUSTER_METADATA_TOPIC_PARTITION))
case ApiKeys.ALTER_PARTITION => case ApiKeys.ALTER_PARTITION =>
new AlterPartitionRequest.Builder(new AlterPartitionRequestData(), true) new AlterPartitionRequest.Builder(new AlterPartitionRequestData())
case ApiKeys.UPDATE_FEATURES => case ApiKeys.UPDATE_FEATURES =>
new UpdateFeaturesRequest.Builder(new UpdateFeaturesRequestData()) new UpdateFeaturesRequest.Builder(new UpdateFeaturesRequestData())

View File

@ -26,7 +26,6 @@ import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.{OffsetFo
import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset
import org.apache.kafka.common.metrics.Metrics import org.apache.kafka.common.metrics.Metrics
import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.network.ListenerName
import org.apache.kafka.common.protocol.ApiKeys
import org.apache.kafka.common.protocol.Errors._ import org.apache.kafka.common.protocol.Errors._
import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH_OFFSET import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH_OFFSET
import org.apache.kafka.common.requests.{OffsetsForLeaderEpochRequest, OffsetsForLeaderEpochResponse} import org.apache.kafka.common.requests.{OffsetsForLeaderEpochRequest, OffsetsForLeaderEpochResponse}
@ -321,8 +320,7 @@ class LeaderEpochIntegrationTest extends QuorumTestHarness with Logging {
.setLeaderEpoch(leaderEpoch)) .setLeaderEpoch(leaderEpoch))
} }
val request = OffsetsForLeaderEpochRequest.Builder.forFollower( val request = OffsetsForLeaderEpochRequest.Builder.forFollower(topics, 1)
ApiKeys.OFFSET_FOR_LEADER_EPOCH.latestVersion, topics, 1)
val response = sender.sendRequest(request) val response = sender.sendRequest(request)
response.responseBody.asInstanceOf[OffsetsForLeaderEpochResponse].data.topics.asScala.flatMap { topic => response.responseBody.asInstanceOf[OffsetsForLeaderEpochResponse].data.topics.asScala.flatMap { topic =>
topic.partitions.asScala.map { partition => topic.partitions.asScala.map { partition =>

View File

@ -56,7 +56,6 @@ import org.apache.kafka.coordinator.group.modern.consumer.ConsumerGroupMember;
import org.apache.kafka.coordinator.group.modern.consumer.ResolvedRegularExpression; import org.apache.kafka.coordinator.group.modern.consumer.ResolvedRegularExpression;
import org.apache.kafka.coordinator.group.modern.share.ShareGroupMember; import org.apache.kafka.coordinator.group.modern.share.ShareGroupMember;
import org.apache.kafka.server.common.ApiMessageAndVersion; import org.apache.kafka.server.common.ApiMessageAndVersion;
import org.apache.kafka.server.common.MetadataVersion;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
@ -69,6 +68,9 @@ import java.util.Set;
* the __consumer_offsets topic. * the __consumer_offsets topic.
*/ */
public class GroupCoordinatorRecordHelpers { public class GroupCoordinatorRecordHelpers {
private static final short GROUP_METADATA_VALUE_VERSION = 3;
private GroupCoordinatorRecordHelpers() {} private GroupCoordinatorRecordHelpers() {}
/** /**
@ -442,13 +444,11 @@ public class GroupCoordinatorRecordHelpers {
* *
* @param group The classic group. * @param group The classic group.
* @param assignment The classic group assignment. * @param assignment The classic group assignment.
* @param metadataVersion The metadata version.
* @return The record. * @return The record.
*/ */
public static CoordinatorRecord newGroupMetadataRecord( public static CoordinatorRecord newGroupMetadataRecord(
ClassicGroup group, ClassicGroup group,
Map<String, byte[]> assignment, Map<String, byte[]> assignment
MetadataVersion metadataVersion
) { ) {
List<GroupMetadataValue.MemberMetadata> members = new ArrayList<>(group.allMembers().size()); List<GroupMetadataValue.MemberMetadata> members = new ArrayList<>(group.allMembers().size());
group.allMembers().forEach(member -> { group.allMembers().forEach(member -> {
@ -490,7 +490,7 @@ public class GroupCoordinatorRecordHelpers {
.setLeader(group.leaderOrNull()) .setLeader(group.leaderOrNull())
.setCurrentStateTimestamp(group.currentStateTimestampOrDefault()) .setCurrentStateTimestamp(group.currentStateTimestampOrDefault())
.setMembers(members), .setMembers(members),
metadataVersion.groupMetadataValueVersion() GROUP_METADATA_VALUE_VERSION
) )
); );
} }
@ -518,12 +518,10 @@ public class GroupCoordinatorRecordHelpers {
* Creates an empty GroupMetadata record. * Creates an empty GroupMetadata record.
* *
* @param group The classic group. * @param group The classic group.
* @param metadataVersion The metadata version.
* @return The record. * @return The record.
*/ */
public static CoordinatorRecord newEmptyGroupMetadataRecord( public static CoordinatorRecord newEmptyGroupMetadataRecord(
ClassicGroup group, ClassicGroup group
MetadataVersion metadataVersion
) { ) {
return new CoordinatorRecord( return new CoordinatorRecord(
new ApiMessageAndVersion( new ApiMessageAndVersion(
@ -539,7 +537,7 @@ public class GroupCoordinatorRecordHelpers {
.setLeader(null) .setLeader(null)
.setCurrentStateTimestamp(group.currentStateTimestampOrDefault()) .setCurrentStateTimestamp(group.currentStateTimestampOrDefault())
.setMembers(Collections.emptyList()), .setMembers(Collections.emptyList()),
metadataVersion.groupMetadataValueVersion() GROUP_METADATA_VALUE_VERSION
) )
); );
} }
@ -551,17 +549,15 @@ public class GroupCoordinatorRecordHelpers {
* @param topic The topic name. * @param topic The topic name.
* @param partitionId The partition id. * @param partitionId The partition id.
* @param offsetAndMetadata The offset and metadata. * @param offsetAndMetadata The offset and metadata.
* @param metadataVersion The metadata version.
* @return The record. * @return The record.
*/ */
public static CoordinatorRecord newOffsetCommitRecord( public static CoordinatorRecord newOffsetCommitRecord(
String groupId, String groupId,
String topic, String topic,
int partitionId, int partitionId,
OffsetAndMetadata offsetAndMetadata, OffsetAndMetadata offsetAndMetadata
MetadataVersion metadataVersion
) { ) {
short version = metadataVersion.offsetCommitValueVersion(offsetAndMetadata.expireTimestampMs.isPresent()); short version = offsetCommitValueVersion(offsetAndMetadata.expireTimestampMs.isPresent());
return new CoordinatorRecord( return new CoordinatorRecord(
new ApiMessageAndVersion( new ApiMessageAndVersion(
@ -584,6 +580,16 @@ public class GroupCoordinatorRecordHelpers {
); );
} }
static short offsetCommitValueVersion(boolean expireTimestampMs) {
if (expireTimestampMs) {
return 1;
} else {
// Serialize with the highest supported non-flexible version
// until a tagged field is introduced or the version is bumped.
return 3;
}
}
/** /**
* Creates an OffsetCommit tombstone record. * Creates an OffsetCommit tombstone record.
* *

View File

@ -727,7 +727,6 @@ public class GroupCoordinatorShard implements CoordinatorShard<CoordinatorRecord
public void onLoaded(MetadataImage newImage) { public void onLoaded(MetadataImage newImage) {
MetadataDelta emptyDelta = new MetadataDelta(newImage); MetadataDelta emptyDelta = new MetadataDelta(newImage);
groupMetadataManager.onNewMetadataImage(newImage, emptyDelta); groupMetadataManager.onNewMetadataImage(newImage, emptyDelta);
offsetMetadataManager.onNewMetadataImage(newImage, emptyDelta);
coordinatorMetrics.activateMetricsShard(metricsShard); coordinatorMetrics.activateMetricsShard(metricsShard);
groupMetadataManager.onLoaded(); groupMetadataManager.onLoaded();
@ -752,7 +751,6 @@ public class GroupCoordinatorShard implements CoordinatorShard<CoordinatorRecord
@Override @Override
public void onNewMetadataImage(MetadataImage newImage, MetadataDelta delta) { public void onNewMetadataImage(MetadataImage newImage, MetadataDelta delta) {
groupMetadataManager.onNewMetadataImage(newImage, delta); groupMetadataManager.onNewMetadataImage(newImage, delta);
offsetMetadataManager.onNewMetadataImage(newImage, delta);
} }
/** /**

View File

@ -1017,7 +1017,7 @@ public class GroupMetadataManager {
throw new GroupIdNotFoundException(String.format("Cannot downgrade the classic group %s: %s.", throw new GroupIdNotFoundException(String.format("Cannot downgrade the classic group %s: %s.",
consumerGroup.groupId(), e.getMessage())); consumerGroup.groupId(), e.getMessage()));
} }
classicGroup.createClassicGroupRecords(metadataImage.features().metadataVersion(), records); classicGroup.createClassicGroupRecords(records);
// Directly update the states instead of replaying the records because // Directly update the states instead of replaying the records because
// the classicGroup reference is needed for triggering the rebalance. // the classicGroup reference is needed for triggering the rebalance.
@ -4411,7 +4411,7 @@ public class GroupMetadataManager {
}); });
records.add( records.add(
GroupCoordinatorRecordHelpers.newEmptyGroupMetadataRecord(group, metadataImage.features().metadataVersion()) GroupCoordinatorRecordHelpers.newEmptyGroupMetadataRecord(group)
); );
return new CoordinatorResult<>(records, appendFuture, false); return new CoordinatorResult<>(records, appendFuture, false);
@ -4841,7 +4841,7 @@ public class GroupMetadataManager {
}); });
List<CoordinatorRecord> records = Collections.singletonList(GroupCoordinatorRecordHelpers.newGroupMetadataRecord( List<CoordinatorRecord> records = Collections.singletonList(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(
group, Collections.emptyMap(), metadataImage.features().metadataVersion())); group, Collections.emptyMap()));
return new CoordinatorResult<>(records, appendFuture, false); return new CoordinatorResult<>(records, appendFuture, false);
@ -5503,7 +5503,7 @@ public class GroupMetadataManager {
}); });
List<CoordinatorRecord> records = Collections.singletonList( List<CoordinatorRecord> records = Collections.singletonList(
GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, groupAssignment, metadataImage.features().metadataVersion()) GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, groupAssignment)
); );
return new CoordinatorResult<>(records, appendFuture, false); return new CoordinatorResult<>(records, appendFuture, false);
@ -5650,7 +5650,7 @@ public class GroupMetadataManager {
}); });
List<CoordinatorRecord> records = Collections.singletonList( List<CoordinatorRecord> records = Collections.singletonList(
GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, assignment, metadataImage.features().metadataVersion()) GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, assignment)
); );
return new CoordinatorResult<>(records, appendFuture, false); return new CoordinatorResult<>(records, appendFuture, false);
} }

View File

@ -47,7 +47,6 @@ import org.apache.kafka.coordinator.group.generated.OffsetCommitKey;
import org.apache.kafka.coordinator.group.generated.OffsetCommitValue; import org.apache.kafka.coordinator.group.generated.OffsetCommitValue;
import org.apache.kafka.coordinator.group.metrics.GroupCoordinatorMetrics; import org.apache.kafka.coordinator.group.metrics.GroupCoordinatorMetrics;
import org.apache.kafka.coordinator.group.metrics.GroupCoordinatorMetricsShard; import org.apache.kafka.coordinator.group.metrics.GroupCoordinatorMetricsShard;
import org.apache.kafka.image.MetadataDelta;
import org.apache.kafka.image.MetadataImage; import org.apache.kafka.image.MetadataImage;
import org.apache.kafka.timeline.SnapshotRegistry; import org.apache.kafka.timeline.SnapshotRegistry;
import org.apache.kafka.timeline.TimelineHashMap; import org.apache.kafka.timeline.TimelineHashMap;
@ -168,11 +167,6 @@ public class OffsetMetadataManager {
*/ */
private final Time time; private final Time time;
/**
* The metadata image.
*/
private MetadataImage metadataImage;
/** /**
* The group metadata manager. * The group metadata manager.
*/ */
@ -284,7 +278,6 @@ public class OffsetMetadataManager {
this.snapshotRegistry = snapshotRegistry; this.snapshotRegistry = snapshotRegistry;
this.log = logContext.logger(OffsetMetadataManager.class); this.log = logContext.logger(OffsetMetadataManager.class);
this.time = time; this.time = time;
this.metadataImage = metadataImage;
this.groupMetadataManager = groupMetadataManager; this.groupMetadataManager = groupMetadataManager;
this.config = config; this.config = config;
this.metrics = metrics; this.metrics = metrics;
@ -498,8 +491,7 @@ public class OffsetMetadataManager {
request.groupId(), request.groupId(),
topic.name(), topic.name(),
partition.partitionIndex(), partition.partitionIndex(),
offsetAndMetadata, offsetAndMetadata
metadataImage.features().metadataVersion()
)); ));
} }
}); });
@ -558,8 +550,7 @@ public class OffsetMetadataManager {
request.groupId(), request.groupId(),
topic.name(), topic.name(),
partition.partitionIndex(), partition.partitionIndex(),
offsetAndMetadata, offsetAndMetadata
metadataImage.features().metadataVersion()
)); ));
} }
}); });
@ -1111,16 +1102,6 @@ public class OffsetMetadataManager {
} }
} }
/**
* A new metadata image is available.
*
* @param newImage The new metadata image.
* @param delta The delta image.
*/
public void onNewMetadataImage(MetadataImage newImage, MetadataDelta delta) {
metadataImage = newImage;
}
/** /**
* @return The offset for the provided groupId and topic partition or null * @return The offset for the provided groupId and topic partition or null
* if it does not exist. * if it does not exist.

View File

@ -41,7 +41,6 @@ import org.apache.kafka.coordinator.group.OffsetExpirationConditionImpl;
import org.apache.kafka.coordinator.group.modern.consumer.ConsumerGroup; import org.apache.kafka.coordinator.group.modern.consumer.ConsumerGroup;
import org.apache.kafka.coordinator.group.modern.consumer.ConsumerGroupMember; import org.apache.kafka.coordinator.group.modern.consumer.ConsumerGroupMember;
import org.apache.kafka.image.MetadataImage; import org.apache.kafka.image.MetadataImage;
import org.apache.kafka.server.common.MetadataVersion;
import org.slf4j.Logger; import org.slf4j.Logger;
@ -1433,11 +1432,9 @@ public class ClassicGroup implements Group {
/** /**
* Populate the record list with the records needed to create the given classic group. * Populate the record list with the records needed to create the given classic group.
* *
* @param metadataVersion The MetadataVersion.
* @param records The list to which the new records are added. * @param records The list to which the new records are added.
*/ */
public void createClassicGroupRecords( public void createClassicGroupRecords(
MetadataVersion metadataVersion,
List<CoordinatorRecord> records List<CoordinatorRecord> records
) { ) {
Map<String, byte[]> assignments = new HashMap<>(); Map<String, byte[]> assignments = new HashMap<>();
@ -1445,7 +1442,7 @@ public class ClassicGroup implements Group {
assignments.put(classicGroupMember.memberId(), classicGroupMember.assignment()) assignments.put(classicGroupMember.memberId(), classicGroupMember.assignment())
); );
records.add(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(this, assignments, metadataVersion)); records.add(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(this, assignments));
} }
/** /**

View File

@ -50,13 +50,8 @@ import org.apache.kafka.coordinator.group.modern.TopicMetadata;
import org.apache.kafka.coordinator.group.modern.consumer.ConsumerGroupMember; import org.apache.kafka.coordinator.group.modern.consumer.ConsumerGroupMember;
import org.apache.kafka.coordinator.group.modern.consumer.ResolvedRegularExpression; import org.apache.kafka.coordinator.group.modern.consumer.ResolvedRegularExpression;
import org.apache.kafka.server.common.ApiMessageAndVersion; import org.apache.kafka.server.common.ApiMessageAndVersion;
import org.apache.kafka.server.common.MetadataVersion;
import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.EnumSource;
import org.junit.jupiter.params.provider.MethodSource;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
@ -70,7 +65,6 @@ import java.util.Optional;
import java.util.OptionalInt; import java.util.OptionalInt;
import java.util.OptionalLong; import java.util.OptionalLong;
import java.util.Set; import java.util.Set;
import java.util.stream.Stream;
import static org.apache.kafka.coordinator.group.Assertions.assertRecordEquals; import static org.apache.kafka.coordinator.group.Assertions.assertRecordEquals;
import static org.apache.kafka.coordinator.group.AssignmentTestUtil.mkOrderedAssignment; import static org.apache.kafka.coordinator.group.AssignmentTestUtil.mkOrderedAssignment;
@ -454,21 +448,8 @@ public class GroupCoordinatorRecordHelpersTest {
)); ));
} }
private static Stream<Arguments> metadataToExpectedGroupMetadataValue() { @Test
return Stream.of( public void testNewGroupMetadataRecord() {
Arguments.arguments(MetadataVersion.IBP_0_10_0_IV0, (short) 0),
Arguments.arguments(MetadataVersion.IBP_1_1_IV0, (short) 1),
Arguments.arguments(MetadataVersion.IBP_2_2_IV0, (short) 2),
Arguments.arguments(MetadataVersion.IBP_3_5_IV0, (short) 3)
);
}
@ParameterizedTest
@MethodSource("metadataToExpectedGroupMetadataValue")
public void testNewGroupMetadataRecord(
MetadataVersion metadataVersion,
short expectedGroupMetadataValueVersion
) {
Time time = new MockTime(); Time time = new MockTime();
List<GroupMetadataValue.MemberMetadata> expectedMembers = new ArrayList<>(); List<GroupMetadataValue.MemberMetadata> expectedMembers = new ArrayList<>();
@ -509,7 +490,7 @@ public class GroupCoordinatorRecordHelpersTest {
.setGeneration(1) .setGeneration(1)
.setCurrentStateTimestamp(time.milliseconds()) .setCurrentStateTimestamp(time.milliseconds())
.setMembers(expectedMembers), .setMembers(expectedMembers),
expectedGroupMetadataValueVersion)); (short) 3));
ClassicGroup group = new ClassicGroup( ClassicGroup group = new ClassicGroup(
new LogContext(), new LogContext(),
@ -544,8 +525,7 @@ public class GroupCoordinatorRecordHelpersTest {
group.initNextGeneration(); group.initNextGeneration();
CoordinatorRecord groupMetadataRecord = GroupCoordinatorRecordHelpers.newGroupMetadataRecord( CoordinatorRecord groupMetadataRecord = GroupCoordinatorRecordHelpers.newGroupMetadataRecord(
group, group,
assignment, assignment
metadataVersion
); );
assertEquals(expectedRecord, groupMetadataRecord); assertEquals(expectedRecord, groupMetadataRecord);
@ -610,8 +590,7 @@ public class GroupCoordinatorRecordHelpersTest {
assertThrows(IllegalStateException.class, () -> assertThrows(IllegalStateException.class, () ->
GroupCoordinatorRecordHelpers.newGroupMetadataRecord( GroupCoordinatorRecordHelpers.newGroupMetadataRecord(
group, group,
Collections.emptyMap(), Collections.emptyMap()
MetadataVersion.IBP_3_5_IV2
)); ));
} }
@ -661,17 +640,12 @@ public class GroupCoordinatorRecordHelpersTest {
assertThrows(IllegalStateException.class, () -> assertThrows(IllegalStateException.class, () ->
GroupCoordinatorRecordHelpers.newGroupMetadataRecord( GroupCoordinatorRecordHelpers.newGroupMetadataRecord(
group, group,
Collections.emptyMap(), Collections.emptyMap()
MetadataVersion.IBP_3_5_IV2
)); ));
} }
@ParameterizedTest @Test
@MethodSource("metadataToExpectedGroupMetadataValue") public void testEmptyGroupMetadataRecord() {
public void testEmptyGroupMetadataRecord(
MetadataVersion metadataVersion,
short expectedGroupMetadataValueVersion
) {
Time time = new MockTime(); Time time = new MockTime();
List<GroupMetadataValue.MemberMetadata> expectedMembers = Collections.emptyList(); List<GroupMetadataValue.MemberMetadata> expectedMembers = Collections.emptyList();
@ -689,7 +663,7 @@ public class GroupCoordinatorRecordHelpersTest {
.setGeneration(0) .setGeneration(0)
.setCurrentStateTimestamp(time.milliseconds()) .setCurrentStateTimestamp(time.milliseconds())
.setMembers(expectedMembers), .setMembers(expectedMembers),
expectedGroupMetadataValueVersion)); (short) 3));
ClassicGroup group = new ClassicGroup( ClassicGroup group = new ClassicGroup(
new LogContext(), new LogContext(),
@ -700,16 +674,20 @@ public class GroupCoordinatorRecordHelpersTest {
group.initNextGeneration(); group.initNextGeneration();
CoordinatorRecord groupMetadataRecord = GroupCoordinatorRecordHelpers.newEmptyGroupMetadataRecord( CoordinatorRecord groupMetadataRecord = GroupCoordinatorRecordHelpers.newEmptyGroupMetadataRecord(
group, group
metadataVersion
); );
assertEquals(expectedRecord, groupMetadataRecord); assertEquals(expectedRecord, groupMetadataRecord);
} }
@ParameterizedTest @Test
@EnumSource(value = MetadataVersion.class) public void testOffsetCommitValueVersion() {
public void testNewOffsetCommitRecord(MetadataVersion metadataVersion) { assertEquals((short) 1, GroupCoordinatorRecordHelpers.offsetCommitValueVersion(true));
assertEquals((short) 3, GroupCoordinatorRecordHelpers.offsetCommitValueVersion(false));
}
@Test
public void testNewOffsetCommitRecord() {
OffsetCommitKey key = new OffsetCommitKey() OffsetCommitKey key = new OffsetCommitKey()
.setGroup("group-id") .setGroup("group-id")
.setTopic("foo") .setTopic("foo")
@ -727,8 +705,7 @@ public class GroupCoordinatorRecordHelpersTest {
(short) 1), (short) 1),
new ApiMessageAndVersion( new ApiMessageAndVersion(
value, value,
metadataVersion.offsetCommitValueVersion(false) GroupCoordinatorRecordHelpers.offsetCommitValueVersion(false))
)
); );
assertEquals(expectedRecord, GroupCoordinatorRecordHelpers.newOffsetCommitRecord( assertEquals(expectedRecord, GroupCoordinatorRecordHelpers.newOffsetCommitRecord(
@ -740,8 +717,7 @@ public class GroupCoordinatorRecordHelpersTest {
OptionalInt.of(10), OptionalInt.of(10),
"metadata", "metadata",
1234L, 1234L,
OptionalLong.empty()), OptionalLong.empty())
metadataVersion
)); ));
value.setLeaderEpoch(-1); value.setLeaderEpoch(-1);
@ -755,14 +731,12 @@ public class GroupCoordinatorRecordHelpersTest {
OptionalInt.empty(), OptionalInt.empty(),
"metadata", "metadata",
1234L, 1234L,
OptionalLong.empty()), OptionalLong.empty())
metadataVersion
)); ));
} }
@ParameterizedTest @Test
@EnumSource(value = MetadataVersion.class) public void testNewOffsetCommitRecordWithExpireTimestamp() {
public void testNewOffsetCommitRecordWithExpireTimestamp(MetadataVersion metadataVersion) {
CoordinatorRecord expectedRecord = new CoordinatorRecord( CoordinatorRecord expectedRecord = new CoordinatorRecord(
new ApiMessageAndVersion( new ApiMessageAndVersion(
new OffsetCommitKey() new OffsetCommitKey()
@ -790,8 +764,7 @@ public class GroupCoordinatorRecordHelpersTest {
OptionalInt.of(10), OptionalInt.of(10),
"metadata", "metadata",
1234L, 1234L,
OptionalLong.of(5678L)), OptionalLong.of(5678L))
metadataVersion
)); ));
} }

View File

@ -94,7 +94,6 @@ import org.apache.kafka.coordinator.group.modern.share.ShareGroupMember;
import org.apache.kafka.image.MetadataDelta; import org.apache.kafka.image.MetadataDelta;
import org.apache.kafka.image.MetadataImage; import org.apache.kafka.image.MetadataImage;
import org.apache.kafka.image.MetadataProvenance; import org.apache.kafka.image.MetadataProvenance;
import org.apache.kafka.server.common.MetadataVersion;
import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.ParameterizedTest;
@ -3646,7 +3645,7 @@ public class GroupMetadataManagerTest {
ClassicGroup group = context.groupMetadataManager.getOrMaybeCreateClassicGroup("group-id", false); ClassicGroup group = context.groupMetadataManager.getOrMaybeCreateClassicGroup("group-id", false);
assertEquals( assertEquals(
List.of(GroupCoordinatorRecordHelpers.newEmptyGroupMetadataRecord(group, MetadataVersion.latestTesting())), List.of(GroupCoordinatorRecordHelpers.newEmptyGroupMetadataRecord(group)),
joinResult.records joinResult.records
); );
} }
@ -3719,8 +3718,7 @@ public class GroupMetadataManagerTest {
.setLeader("member-0") .setLeader("member-0")
.setProtocolType("consumer") .setProtocolType("consumer")
.setProtocol("range") .setProtocol("range")
.setCurrentStateTimestamp(context.time.milliseconds()), .setCurrentStateTimestamp(context.time.milliseconds()));
MetadataVersion.latestTesting());
context.replay(groupMetadataRecord); context.replay(groupMetadataRecord);
ClassicGroup group = context.groupMetadataManager.getOrMaybeCreateClassicGroup("group-id", false); ClassicGroup group = context.groupMetadataManager.getOrMaybeCreateClassicGroup("group-id", false);
@ -3777,8 +3775,7 @@ public class GroupMetadataManagerTest {
.setLeader("member-0") .setLeader("member-0")
.setProtocolType("consumer") .setProtocolType("consumer")
.setProtocol("range") .setProtocol("range")
.setCurrentStateTimestamp(context.time.milliseconds()), .setCurrentStateTimestamp(context.time.milliseconds()));
MetadataVersion.latestTesting());
context.replay(groupMetadataRecord); context.replay(groupMetadataRecord);
context.groupMetadataManager.onLoaded(); context.groupMetadataManager.onLoaded();
@ -3817,8 +3814,7 @@ public class GroupMetadataManagerTest {
.setLeader("member-0") .setLeader("member-0")
.setProtocolType("consumer") .setProtocolType("consumer")
.setProtocol("range") .setProtocol("range")
.setCurrentStateTimestamp(context.time.milliseconds()), .setCurrentStateTimestamp(context.time.milliseconds()));
MetadataVersion.latestTesting());
context.replay(groupMetadataRecord); context.replay(groupMetadataRecord);
context.groupMetadataManager.onLoaded(); context.groupMetadataManager.onLoaded();
@ -4716,7 +4712,7 @@ public class GroupMetadataManagerTest {
timeouts.forEach(timeout -> { timeouts.forEach(timeout -> {
assertEquals(classicGroupHeartbeatKey("group-id", memberId), timeout.key); assertEquals(classicGroupHeartbeatKey("group-id", memberId), timeout.key);
assertEquals( assertEquals(
List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment(), MetadataVersion.latestTesting())), List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment())),
timeout.result.records() timeout.result.records()
); );
}); });
@ -5196,7 +5192,7 @@ public class GroupMetadataManagerTest {
); );
assertEquals( assertEquals(
List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment(), MetadataVersion.latestTesting())), List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment())),
joinResult.records joinResult.records
); );
assertFalse(joinResult.joinFuture.isDone()); assertFalse(joinResult.joinFuture.isDone());
@ -5323,7 +5319,7 @@ public class GroupMetadataManagerTest {
); );
assertEquals( assertEquals(
List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment(), MetadataVersion.latestTesting())), List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment())),
joinResult.records joinResult.records
); );
assertFalse(joinResult.joinFuture.isDone()); assertFalse(joinResult.joinFuture.isDone());
@ -5399,7 +5395,7 @@ public class GroupMetadataManagerTest {
supportSkippingAssignment); supportSkippingAssignment);
assertEquals( assertEquals(
List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment(), MetadataVersion.latestTesting())), List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment())),
joinResult.records joinResult.records
); );
assertFalse(joinResult.joinFuture.isDone()); assertFalse(joinResult.joinFuture.isDone());
@ -5536,8 +5532,7 @@ public class GroupMetadataManagerTest {
.setLeader(null) .setLeader(null)
.setProtocolType("consumer") .setProtocolType("consumer")
.setProtocol(null) .setProtocol(null)
.setCurrentStateTimestamp(context.time.milliseconds()), .setCurrentStateTimestamp(context.time.milliseconds()))
MetadataVersion.latestTesting())
); );
assertEquals(1, timeouts.size()); assertEquals(1, timeouts.size());
@ -5967,7 +5962,7 @@ public class GroupMetadataManagerTest {
); );
assertEquals( assertEquals(
List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment(), MetadataVersion.latestTesting())), List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment())),
joinResult.records joinResult.records
); );
// Simulate a successful write to the log. // Simulate a successful write to the log.
@ -6280,7 +6275,7 @@ public class GroupMetadataManagerTest {
); );
assertEquals( assertEquals(
List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment(), MetadataVersion.latestTesting())), List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment())),
followerJoinResult.records followerJoinResult.records
); );
// Simulate a failed write to the log. // Simulate a failed write to the log.
@ -6337,7 +6332,7 @@ public class GroupMetadataManagerTest {
leaderSyncResult.appendFuture.complete(null); leaderSyncResult.appendFuture.complete(null);
assertEquals( assertEquals(
List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment(), MetadataVersion.latestTesting())), List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment())),
leaderSyncResult.records leaderSyncResult.records
); );
@ -6387,7 +6382,7 @@ public class GroupMetadataManagerTest {
); );
assertEquals( assertEquals(
List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment(), MetadataVersion.latestTesting())), List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment())),
followerJoinResult.records followerJoinResult.records
); );
@ -6599,7 +6594,7 @@ public class GroupMetadataManagerTest {
); );
assertEquals( assertEquals(
List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment(), MetadataVersion.latestTesting())), List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment())),
followerJoinResult.records followerJoinResult.records
); );
// Simulate a successful write to log. // Simulate a successful write to log.
@ -6806,7 +6801,7 @@ public class GroupMetadataManagerTest {
); );
assertEquals( assertEquals(
List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment(), MetadataVersion.latestTesting())), List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment())),
leaderJoinResult.records leaderJoinResult.records
); );
// Simulate a successful write to log. // Simulate a successful write to log.
@ -7587,7 +7582,7 @@ public class GroupMetadataManagerTest {
assertEquals( assertEquals(
List.of( List.of(
GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, updatedAssignment, MetadataVersion.latestTesting())), GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, updatedAssignment)),
syncResult.records syncResult.records
); );
@ -8246,7 +8241,7 @@ public class GroupMetadataManagerTest {
ExpiredTimeout<Void, CoordinatorRecord> timeout = timeouts.get(0); ExpiredTimeout<Void, CoordinatorRecord> timeout = timeouts.get(0);
assertEquals(classicGroupSyncKey("group-id"), timeout.key); assertEquals(classicGroupSyncKey("group-id"), timeout.key);
assertEquals( assertEquals(
List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment(), MetadataVersion.latestTesting())), List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment())),
timeout.result.records() timeout.result.records()
); );
@ -8402,7 +8397,7 @@ public class GroupMetadataManagerTest {
if (response.memberId().equals(leaderId)) { if (response.memberId().equals(leaderId)) {
assertEquals( assertEquals(
List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment(), MetadataVersion.latestTesting())), List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment())),
syncResult.records syncResult.records
); );
@ -8497,8 +8492,7 @@ public class GroupMetadataManagerTest {
.setLeader(null) .setLeader(null)
.setProtocolType("classic") .setProtocolType("classic")
.setProtocol("range") .setProtocol("range")
.setCurrentStateTimestamp(context.time.milliseconds()), .setCurrentStateTimestamp(context.time.milliseconds())));
MetadataVersion.latestTesting()));
// Create one share group record. // Create one share group record.
context.replay(GroupCoordinatorRecordHelpers.newShareGroupEpochRecord(shareGroupId, 6)); context.replay(GroupCoordinatorRecordHelpers.newShareGroupEpochRecord(shareGroupId, 6));
context.commit(); context.commit();
@ -8792,8 +8786,7 @@ public class GroupMetadataManagerTest {
context.replay(GroupMetadataManagerTestContext.newGroupMetadataRecord( context.replay(GroupMetadataManagerTestContext.newGroupMetadataRecord(
"group-id", "group-id",
groupMetadataValue, groupMetadataValue
MetadataVersion.latestTesting()
)); ));
context.verifyDescribeGroupsReturnsDeadGroup("group-id"); context.verifyDescribeGroupsReturnsDeadGroup("group-id");
context.commit(); context.commit();
@ -8841,8 +8834,7 @@ public class GroupMetadataManagerTest {
context.replay(GroupMetadataManagerTestContext.newGroupMetadataRecord( context.replay(GroupMetadataManagerTestContext.newGroupMetadataRecord(
"group-id", "group-id",
groupMetadataValue, groupMetadataValue
MetadataVersion.latestTesting()
)); ));
ClassicGroup group = context.groupMetadataManager.getOrMaybeCreateClassicGroup("group-id", false); ClassicGroup group = context.groupMetadataManager.getOrMaybeCreateClassicGroup("group-id", false);
context.groupMetadataManager.prepareRebalance(group, "trigger rebalance"); context.groupMetadataManager.prepareRebalance(group, "trigger rebalance");
@ -9101,7 +9093,7 @@ public class GroupMetadataManagerTest {
)) ))
); );
assertEquals( assertEquals(
List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment(), MetadataVersion.latestTesting())), List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment())),
leaveResult.records() leaveResult.records()
); );
// Simulate a successful write to the log. // Simulate a successful write to the log.
@ -9571,7 +9563,7 @@ public class GroupMetadataManagerTest {
// Even if there are more group metadata records loaded than tombstone records, the last replayed record // Even if there are more group metadata records loaded than tombstone records, the last replayed record
// (tombstone in this test) is the latest state of the group. Hence, the overall metric count should be 0. // (tombstone in this test) is the latest state of the group. Hence, the overall metric count should be 0.
IntStream.range(0, 5).forEach(__ -> IntStream.range(0, 5).forEach(__ ->
context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, Collections.emptyMap(), MetadataVersion.LATEST_PRODUCTION)) context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, Collections.emptyMap()))
); );
IntStream.range(0, 4).forEach(__ -> IntStream.range(0, 4).forEach(__ ->
context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataTombstoneRecord("group-id")) context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataTombstoneRecord("group-id"))
@ -9638,7 +9630,7 @@ public class GroupMetadataManagerTest {
EMPTY, EMPTY,
context.time context.time
); );
context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(classicGroup, classicGroup.groupAssignment(), MetadataVersion.latestTesting())); context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(classicGroup, classicGroup.groupAssignment()));
context.groupMetadataManager.getOrMaybeCreateClassicGroup(classicGroupId, false).transitionTo(PREPARING_REBALANCE); context.groupMetadataManager.getOrMaybeCreateClassicGroup(classicGroupId, false).transitionTo(PREPARING_REBALANCE);
assertThrows(GroupIdNotFoundException.class, () -> assertThrows(GroupIdNotFoundException.class, () ->
@ -9666,7 +9658,7 @@ public class GroupMetadataManagerTest {
EMPTY, EMPTY,
context.time context.time
); );
context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(classicGroup, classicGroup.groupAssignment(), MetadataVersion.latestTesting())); context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(classicGroup, classicGroup.groupAssignment()));
CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> result = context.consumerGroupHeartbeat( CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> result = context.consumerGroupHeartbeat(
new ConsumerGroupHeartbeatRequestData() new ConsumerGroupHeartbeatRequestData()
@ -9810,7 +9802,7 @@ public class GroupMetadataManagerTest {
group.transitionTo(COMPLETING_REBALANCE); group.transitionTo(COMPLETING_REBALANCE);
group.transitionTo(STABLE); group.transitionTo(STABLE);
context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, assignments, metadataImage.features().metadataVersion())); context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, assignments));
context.commit(); context.commit();
group = context.groupMetadataManager.getOrMaybeCreateClassicGroup(groupId, false); group = context.groupMetadataManager.getOrMaybeCreateClassicGroup(groupId, false);
@ -9996,7 +9988,7 @@ public class GroupMetadataManagerTest {
group.transitionTo(COMPLETING_REBALANCE); group.transitionTo(COMPLETING_REBALANCE);
group.transitionTo(STABLE); group.transitionTo(STABLE);
context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, assignments, metadataImage.features().metadataVersion())); context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, assignments));
context.commit(); context.commit();
group = context.groupMetadataManager.getOrMaybeCreateClassicGroup(groupId, false); group = context.groupMetadataManager.getOrMaybeCreateClassicGroup(groupId, false);
@ -10201,7 +10193,7 @@ public class GroupMetadataManagerTest {
group.transitionTo(COMPLETING_REBALANCE); group.transitionTo(COMPLETING_REBALANCE);
group.transitionTo(STABLE); group.transitionTo(STABLE);
context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, assignments, metadataImage.features().metadataVersion())); context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, assignments));
context.commit(); context.commit();
group = context.groupMetadataManager.getOrMaybeCreateClassicGroup(groupId, false); group = context.groupMetadataManager.getOrMaybeCreateClassicGroup(groupId, false);
@ -10282,7 +10274,7 @@ public class GroupMetadataManagerTest {
group.transitionTo(COMPLETING_REBALANCE); group.transitionTo(COMPLETING_REBALANCE);
group.transitionTo(STABLE); group.transitionTo(STABLE);
context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, assignments, metadataImage.features().metadataVersion())); context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, assignments));
context.commit(); context.commit();
// The static member rejoins with new protocol after a restart, triggering the upgrade. // The static member rejoins with new protocol after a restart, triggering the upgrade.
@ -10765,7 +10757,7 @@ public class GroupMetadataManagerTest {
group.transitionTo(COMPLETING_REBALANCE); group.transitionTo(COMPLETING_REBALANCE);
group.transitionTo(STABLE); group.transitionTo(STABLE);
context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, assignments, metadataImage.features().metadataVersion())); context.replay(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, assignments));
context.commit(); context.commit();
group = context.groupMetadataManager.getOrMaybeCreateClassicGroup(groupId, false); group = context.groupMetadataManager.getOrMaybeCreateClassicGroup(groupId, false);
@ -11156,7 +11148,7 @@ public class GroupMetadataManagerTest {
), ),
List.of(GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataTombstoneRecord(groupId)), List.of(GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataTombstoneRecord(groupId)),
List.of(GroupCoordinatorRecordHelpers.newConsumerGroupEpochTombstoneRecord(groupId)), List.of(GroupCoordinatorRecordHelpers.newConsumerGroupEpochTombstoneRecord(groupId)),
List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(expectedClassicGroup, assignments, MetadataVersion.latestTesting())) List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(expectedClassicGroup, assignments))
), ),
result.records() result.records()
); );
@ -11343,7 +11335,7 @@ public class GroupMetadataManagerTest {
), ),
List.of(GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataTombstoneRecord(groupId)), List.of(GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataTombstoneRecord(groupId)),
List.of(GroupCoordinatorRecordHelpers.newConsumerGroupEpochTombstoneRecord(groupId)), List.of(GroupCoordinatorRecordHelpers.newConsumerGroupEpochTombstoneRecord(groupId)),
List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(expectedClassicGroup, assignments, MetadataVersion.latestTesting())) List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(expectedClassicGroup, assignments))
), ),
timeout.result.records() timeout.result.records()
); );
@ -11548,7 +11540,7 @@ public class GroupMetadataManagerTest {
), ),
List.of(GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataTombstoneRecord(groupId)), List.of(GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataTombstoneRecord(groupId)),
List.of(GroupCoordinatorRecordHelpers.newConsumerGroupEpochTombstoneRecord(groupId)), List.of(GroupCoordinatorRecordHelpers.newConsumerGroupEpochTombstoneRecord(groupId)),
List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(expectedClassicGroup, assignments, MetadataVersion.latestTesting())) List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(expectedClassicGroup, assignments))
), ),
timeout.result.records() timeout.result.records()
); );
@ -11783,7 +11775,7 @@ public class GroupMetadataManagerTest {
List.of(GroupCoordinatorRecordHelpers.newConsumerGroupEpochTombstoneRecord(groupId)), List.of(GroupCoordinatorRecordHelpers.newConsumerGroupEpochTombstoneRecord(groupId)),
// Create the classic group. // Create the classic group.
List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(expectedClassicGroup, assignments, MetadataVersion.latestTesting())) List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(expectedClassicGroup, assignments))
), ),
result.records result.records
); );

View File

@ -94,7 +94,6 @@ import org.apache.kafka.coordinator.group.modern.share.ShareGroup;
import org.apache.kafka.coordinator.group.modern.share.ShareGroupBuilder; import org.apache.kafka.coordinator.group.modern.share.ShareGroupBuilder;
import org.apache.kafka.image.MetadataImage; import org.apache.kafka.image.MetadataImage;
import org.apache.kafka.server.common.ApiMessageAndVersion; import org.apache.kafka.server.common.ApiMessageAndVersion;
import org.apache.kafka.server.common.MetadataVersion;
import org.apache.kafka.timeline.SnapshotRegistry; import org.apache.kafka.timeline.SnapshotRegistry;
import java.net.InetAddress; import java.net.InetAddress;
@ -226,8 +225,7 @@ public class GroupMetadataManagerTestContext {
public static CoordinatorRecord newGroupMetadataRecord( public static CoordinatorRecord newGroupMetadataRecord(
String groupId, String groupId,
GroupMetadataValue value, GroupMetadataValue value
MetadataVersion metadataVersion
) { ) {
return new CoordinatorRecord( return new CoordinatorRecord(
new ApiMessageAndVersion( new ApiMessageAndVersion(
@ -237,7 +235,7 @@ public class GroupMetadataManagerTestContext {
), ),
new ApiMessageAndVersion( new ApiMessageAndVersion(
value, value,
metadataVersion.groupMetadataValueVersion() (short) 3
) )
); );
} }
@ -849,7 +847,7 @@ public class GroupMetadataManagerTestContext {
.build()); .build());
assertEquals( assertEquals(
List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment(), MetadataVersion.latestTesting())), List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment())),
syncResult.records syncResult.records
); );
// Simulate a successful write to the log. // Simulate a successful write to the log.
@ -1057,7 +1055,7 @@ public class GroupMetadataManagerTestContext {
)); ));
assertEquals( assertEquals(
List.of( List.of(
GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, groupAssignment, MetadataVersion.latestTesting())), GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, groupAssignment)),
leaderSyncResult.records leaderSyncResult.records
); );
@ -1117,7 +1115,7 @@ public class GroupMetadataManagerTestContext {
// Now the group is stable, with the one member that joined above // Now the group is stable, with the one member that joined above
assertEquals( assertEquals(
List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment(), MetadataVersion.latestTesting())), List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment())),
syncResult.records syncResult.records
); );
// Simulate a successful write to log. // Simulate a successful write to log.
@ -1155,7 +1153,7 @@ public class GroupMetadataManagerTestContext {
syncResult = sendClassicGroupSync(syncRequest.setGenerationId(nextGenerationId)); syncResult = sendClassicGroupSync(syncRequest.setGenerationId(nextGenerationId));
assertEquals( assertEquals(
List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment(), MetadataVersion.latestTesting())), List.of(GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, group.groupAssignment())),
syncResult.records syncResult.records
); );
// Simulate a successful write to log. // Simulate a successful write to log.
@ -1218,8 +1216,7 @@ public class GroupMetadataManagerTestContext {
.setLeader(null) .setLeader(null)
.setProtocolType("consumer") .setProtocolType("consumer")
.setProtocol(null) .setProtocol(null)
.setCurrentStateTimestamp(time.milliseconds()), .setCurrentStateTimestamp(time.milliseconds())
MetadataVersion.latestTesting()
)); ));

View File

@ -62,7 +62,6 @@ import org.apache.kafka.coordinator.group.modern.consumer.ConsumerGroup;
import org.apache.kafka.coordinator.group.modern.consumer.ConsumerGroupMember; import org.apache.kafka.coordinator.group.modern.consumer.ConsumerGroupMember;
import org.apache.kafka.image.MetadataImage; import org.apache.kafka.image.MetadataImage;
import org.apache.kafka.server.common.ApiMessageAndVersion; import org.apache.kafka.server.common.ApiMessageAndVersion;
import org.apache.kafka.server.common.MetadataVersion;
import org.apache.kafka.timeline.SnapshotRegistry; import org.apache.kafka.timeline.SnapshotRegistry;
import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Test;
@ -449,8 +448,7 @@ public class OffsetMetadataManagerTest {
"metadata", "metadata",
commitTimestamp, commitTimestamp,
OptionalLong.empty() OptionalLong.empty()
), )
MetadataVersion.latestTesting()
)); ));
} }
@ -907,8 +905,7 @@ public class OffsetMetadataManagerTest {
"", "",
context.time.milliseconds(), context.time.milliseconds(),
OptionalLong.of(context.time.milliseconds() + 1234L) OptionalLong.of(context.time.milliseconds() + 1234L)
), )
MetadataImage.EMPTY.features().metadataVersion()
)), )),
result.records() result.records()
); );
@ -1015,8 +1012,7 @@ public class OffsetMetadataManagerTest {
"", "",
context.time.milliseconds(), context.time.milliseconds(),
OptionalLong.empty() OptionalLong.empty()
), )
MetadataImage.EMPTY.features().metadataVersion()
)), )),
result.records() result.records()
); );
@ -1075,8 +1071,7 @@ public class OffsetMetadataManagerTest {
"", "",
context.time.milliseconds(), context.time.milliseconds(),
OptionalLong.empty() OptionalLong.empty()
), )
MetadataImage.EMPTY.features().metadataVersion()
)), )),
result.records() result.records()
); );
@ -1239,8 +1234,7 @@ public class OffsetMetadataManagerTest {
"", "",
context.time.milliseconds(), context.time.milliseconds(),
OptionalLong.empty() OptionalLong.empty()
), )
MetadataImage.EMPTY.features().metadataVersion()
)), )),
result.records() result.records()
); );
@ -1306,8 +1300,7 @@ public class OffsetMetadataManagerTest {
"metadata", "metadata",
context.time.milliseconds(), context.time.milliseconds(),
OptionalLong.empty() OptionalLong.empty()
), )
MetadataImage.EMPTY.features().metadataVersion()
)), )),
result.records() result.records()
); );
@ -1383,8 +1376,7 @@ public class OffsetMetadataManagerTest {
"small", "small",
context.time.milliseconds(), context.time.milliseconds(),
OptionalLong.empty() OptionalLong.empty()
), )
MetadataImage.EMPTY.features().metadataVersion()
)), )),
result.records() result.records()
); );
@ -1450,8 +1442,7 @@ public class OffsetMetadataManagerTest {
"metadata", "metadata",
context.time.milliseconds(), context.time.milliseconds(),
OptionalLong.empty() OptionalLong.empty()
), )
MetadataImage.EMPTY.features().metadataVersion()
)), )),
result.records() result.records()
); );
@ -1608,8 +1599,7 @@ public class OffsetMetadataManagerTest {
"metadata", "metadata",
context.time.milliseconds(), context.time.milliseconds(),
OptionalLong.empty() OptionalLong.empty()
), )
MetadataImage.EMPTY.features().metadataVersion()
)), )),
result.records() result.records()
); );
@ -3136,8 +3126,7 @@ public class OffsetMetadataManagerTest {
groupId, groupId,
topic, topic,
partition, partition,
offsetAndMetadata, offsetAndMetadata
MetadataImage.EMPTY.features().metadataVersion()
)); ));
assertEquals(offsetAndMetadata, context.offsetMetadataManager.offset( assertEquals(offsetAndMetadata, context.offsetMetadataManager.offset(
@ -3159,8 +3148,7 @@ public class OffsetMetadataManagerTest {
groupId, groupId,
topic, topic,
partition, partition,
offsetAndMetadata, offsetAndMetadata
MetadataImage.EMPTY.features().metadataVersion()
)); ));
assertEquals(offsetAndMetadata, context.offsetMetadataManager.pendingTransactionalOffset( assertEquals(offsetAndMetadata, context.offsetMetadataManager.pendingTransactionalOffset(

View File

@ -29,7 +29,6 @@ import org.apache.kafka.common.record.RecordBatch;
import org.apache.kafka.common.record.Records; import org.apache.kafka.common.record.Records;
import org.apache.kafka.common.utils.Exit; import org.apache.kafka.common.utils.Exit;
import org.apache.kafka.common.utils.Utils; import org.apache.kafka.common.utils.Utils;
import org.apache.kafka.server.common.MetadataVersion;
import org.apache.kafka.server.common.RequestLocal; import org.apache.kafka.server.common.RequestLocal;
import org.apache.kafka.server.storage.log.FetchIsolation; import org.apache.kafka.server.storage.log.FetchIsolation;
import org.apache.kafka.server.util.MockTime; import org.apache.kafka.server.util.MockTime;
@ -166,7 +165,6 @@ public class StressTestLog {
LogAppendInfo logAppendInfo = log.appendAsLeader(records, LogAppendInfo logAppendInfo = log.appendAsLeader(records,
0, 0,
AppendOrigin.CLIENT, AppendOrigin.CLIENT,
MetadataVersion.LATEST_PRODUCTION,
RequestLocal.noCaching(), RequestLocal.noCaching(),
VerificationGuard.SENTINEL); VerificationGuard.SENTINEL);

View File

@ -32,7 +32,6 @@ import org.apache.kafka.common.utils.Exit;
import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Time;
import org.apache.kafka.common.utils.Utils; import org.apache.kafka.common.utils.Utils;
import org.apache.kafka.coordinator.transaction.TransactionLogConfig; import org.apache.kafka.coordinator.transaction.TransactionLogConfig;
import org.apache.kafka.server.common.MetadataVersion;
import org.apache.kafka.server.common.RequestLocal; import org.apache.kafka.server.common.RequestLocal;
import org.apache.kafka.server.util.CommandLineUtils; import org.apache.kafka.server.util.CommandLineUtils;
import org.apache.kafka.server.util.KafkaScheduler; import org.apache.kafka.server.util.KafkaScheduler;
@ -327,7 +326,6 @@ public class TestLinearWriteSpeed {
messages, messages,
0, 0,
AppendOrigin.CLIENT, AppendOrigin.CLIENT,
MetadataVersion.latestProduction(),
RequestLocal.noCaching(), RequestLocal.noCaching(),
VerificationGuard.SENTINEL VerificationGuard.SENTINEL
); );

View File

@ -124,8 +124,7 @@ public class PartitionMakeFollowerBenchmark {
Mockito.when(offsetCheckpoints.fetch(logDir.getAbsolutePath(), tp)).thenReturn(Optional.of(0L)); Mockito.when(offsetCheckpoints.fetch(logDir.getAbsolutePath(), tp)).thenReturn(Optional.of(0L));
AlterPartitionListener alterPartitionListener = Mockito.mock(AlterPartitionListener.class); AlterPartitionListener alterPartitionListener = Mockito.mock(AlterPartitionListener.class);
AlterPartitionManager alterPartitionManager = Mockito.mock(AlterPartitionManager.class); AlterPartitionManager alterPartitionManager = Mockito.mock(AlterPartitionManager.class);
partition = new Partition(tp, 100, partition = new Partition(tp, 100, 0, () -> -1, Time.SYSTEM,
MetadataVersion.latestTesting(), 0, () -> -1, Time.SYSTEM,
alterPartitionListener, delayedOperations, alterPartitionListener, delayedOperations,
Mockito.mock(MetadataCache.class), logManager, alterPartitionManager, topicId); Mockito.mock(MetadataCache.class), logManager, alterPartitionManager, topicId);
partition.createLogIfNotExists(true, false, offsetCheckpoints, topicId, Option.empty()); partition.createLogIfNotExists(true, false, offsetCheckpoints, topicId, Option.empty());

View File

@ -128,7 +128,7 @@ public class UpdateFollowerFetchStateBenchmark {
AlterPartitionListener alterPartitionListener = Mockito.mock(AlterPartitionListener.class); AlterPartitionListener alterPartitionListener = Mockito.mock(AlterPartitionListener.class);
AlterPartitionManager alterPartitionManager = Mockito.mock(AlterPartitionManager.class); AlterPartitionManager alterPartitionManager = Mockito.mock(AlterPartitionManager.class);
partition = new Partition(topicPartition, 100, partition = new Partition(topicPartition, 100,
MetadataVersion.latestTesting(), 0, () -> -1, Time.SYSTEM, 0, () -> -1, Time.SYSTEM,
alterPartitionListener, delayedOperations, alterPartitionListener, delayedOperations,
Mockito.mock(MetadataCache.class), logManager, alterPartitionManager, topicId); Mockito.mock(MetadataCache.class), logManager, alterPartitionManager, topicId);
partition.makeLeader(partitionState, offsetCheckpoints, topicId, Option.empty()); partition.makeLeader(partitionState, offsetCheckpoints, topicId, Option.empty());

View File

@ -23,7 +23,6 @@ import org.apache.kafka.common.record.MemoryRecords;
import org.apache.kafka.common.record.TimestampType; import org.apache.kafka.common.record.TimestampType;
import org.apache.kafka.common.utils.PrimitiveRef; import org.apache.kafka.common.utils.PrimitiveRef;
import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Time;
import org.apache.kafka.server.common.MetadataVersion;
import org.apache.kafka.storage.internals.log.AppendOrigin; import org.apache.kafka.storage.internals.log.AppendOrigin;
import org.apache.kafka.storage.internals.log.LogValidator; import org.apache.kafka.storage.internals.log.LogValidator;
@ -55,8 +54,7 @@ public class CompressedRecordBatchValidationBenchmark extends BaseRecordBatchBen
MemoryRecords records = MemoryRecords.readableRecords(singleBatchBuffer.duplicate()); MemoryRecords records = MemoryRecords.readableRecords(singleBatchBuffer.duplicate());
new LogValidator(records, new TopicPartition("a", 0), new LogValidator(records, new TopicPartition("a", 0),
Time.SYSTEM, compressionType, compression(), false, messageVersion, Time.SYSTEM, compressionType, compression(), false, messageVersion,
TimestampType.CREATE_TIME, Long.MAX_VALUE, Long.MAX_VALUE, 0, AppendOrigin.CLIENT, TimestampType.CREATE_TIME, Long.MAX_VALUE, Long.MAX_VALUE, 0, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsetsCompressed(PrimitiveRef.ofLong(startingOffset), ).validateMessagesAndAssignOffsetsCompressed(PrimitiveRef.ofLong(startingOffset),
validatorMetricsRecorder, requestLocal.bufferSupplier()); validatorMetricsRecorder, requestLocal.bufferSupplier());
} }

View File

@ -23,7 +23,6 @@ import org.apache.kafka.common.record.MemoryRecords;
import org.apache.kafka.common.record.TimestampType; import org.apache.kafka.common.record.TimestampType;
import org.apache.kafka.common.utils.PrimitiveRef; import org.apache.kafka.common.utils.PrimitiveRef;
import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Time;
import org.apache.kafka.server.common.MetadataVersion;
import org.apache.kafka.storage.internals.log.AppendOrigin; import org.apache.kafka.storage.internals.log.AppendOrigin;
import org.apache.kafka.storage.internals.log.LogValidator; import org.apache.kafka.storage.internals.log.LogValidator;
@ -51,8 +50,7 @@ public class UncompressedRecordBatchValidationBenchmark extends BaseRecordBatchB
MemoryRecords records = MemoryRecords.readableRecords(singleBatchBuffer.duplicate()); MemoryRecords records = MemoryRecords.readableRecords(singleBatchBuffer.duplicate());
new LogValidator(records, new TopicPartition("a", 0), new LogValidator(records, new TopicPartition("a", 0),
Time.SYSTEM, CompressionType.NONE, Compression.NONE, false, Time.SYSTEM, CompressionType.NONE, Compression.NONE, false,
messageVersion, TimestampType.CREATE_TIME, Long.MAX_VALUE, Long.MAX_VALUE, 0, AppendOrigin.CLIENT, messageVersion, TimestampType.CREATE_TIME, Long.MAX_VALUE, Long.MAX_VALUE, 0, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).assignOffsetsNonCompressed(PrimitiveRef.ofLong(startingOffset), validatorMetricsRecorder); ).assignOffsetsNonCompressed(PrimitiveRef.ofLong(startingOffset), validatorMetricsRecorder);
} }
} }

View File

@ -348,9 +348,9 @@ public class FeatureControlManagerTest {
public void testCanUseSafeDowngradeIfMetadataDidNotChange() { public void testCanUseSafeDowngradeIfMetadataDidNotChange() {
FeatureControlManager manager = new FeatureControlManager.Builder(). FeatureControlManager manager = new FeatureControlManager.Builder().
setQuorumFeatures(features(MetadataVersion.FEATURE_NAME, setQuorumFeatures(features(MetadataVersion.FEATURE_NAME,
MetadataVersion.IBP_3_0_IV0.featureLevel(), MetadataVersion.IBP_3_3_IV1.featureLevel())). MetadataVersion.IBP_3_0_IV1.featureLevel(), MetadataVersion.IBP_3_3_IV1.featureLevel())).
setMetadataVersion(MetadataVersion.IBP_3_1_IV0). setMetadataVersion(MetadataVersion.IBP_3_1_IV0).
setMinimumBootstrapVersion(MetadataVersion.IBP_3_0_IV0). setMinimumBootstrapVersion(MetadataVersion.IBP_3_0_IV1).
build(); build();
assertEquals(ControllerResult.of(Collections.emptyList(), ApiError.NONE), assertEquals(ControllerResult.of(Collections.emptyList(), ApiError.NONE),
manager.updateFeatures( manager.updateFeatures(
@ -363,7 +363,7 @@ public class FeatureControlManagerTest {
public void testCannotDowngradeBefore3_3_IV0() { public void testCannotDowngradeBefore3_3_IV0() {
FeatureControlManager manager = new FeatureControlManager.Builder(). FeatureControlManager manager = new FeatureControlManager.Builder().
setQuorumFeatures(features(MetadataVersion.FEATURE_NAME, setQuorumFeatures(features(MetadataVersion.FEATURE_NAME,
MetadataVersion.IBP_3_0_IV0.featureLevel(), MetadataVersion.IBP_3_3_IV3.featureLevel())). MetadataVersion.IBP_3_0_IV1.featureLevel(), MetadataVersion.IBP_3_3_IV3.featureLevel())).
setMetadataVersion(MetadataVersion.IBP_3_3_IV0). setMetadataVersion(MetadataVersion.IBP_3_3_IV0).
build(); build();
assertEquals(ControllerResult.of(Collections.emptyList(), new ApiError(Errors.INVALID_UPDATE_VERSION, assertEquals(ControllerResult.of(Collections.emptyList(), new ApiError(Errors.INVALID_UPDATE_VERSION,

View File

@ -692,7 +692,7 @@ public class QuorumControllerTest {
alterPartitionRequest.topics().add(topicData); alterPartitionRequest.topics().add(topicData);
active.alterPartition(ANONYMOUS_CONTEXT, new AlterPartitionRequest active.alterPartition(ANONYMOUS_CONTEXT, new AlterPartitionRequest
.Builder(alterPartitionRequest, false).build((short) 0).data()).get(); .Builder(alterPartitionRequest).build((short) 0).data()).get();
AtomicLong lastHeartbeatMs = new AtomicLong(getMonotonicMs(active.time())); AtomicLong lastHeartbeatMs = new AtomicLong(getMonotonicMs(active.time()));
sendBrokerHeartbeatToUnfenceBrokers(active, allBrokers, brokerEpochs); sendBrokerHeartbeatToUnfenceBrokers(active, allBrokers, brokerEpochs);

View File

@ -1885,7 +1885,7 @@ public class ReplicationControlManagerTest {
setNewIsrWithEpochs(isrWithDefaultEpoch(3, 0, 2, 1)))))); setNewIsrWithEpochs(isrWithDefaultEpoch(3, 0, 2, 1))))));
ControllerResult<AlterPartitionResponseData> alterPartitionResult = replication.alterPartition( ControllerResult<AlterPartitionResponseData> alterPartitionResult = replication.alterPartition(
requestContext, requestContext,
new AlterPartitionRequest.Builder(alterPartitionRequestData, version > 1).build(version).data()); new AlterPartitionRequest.Builder(alterPartitionRequestData).build(version).data());
Errors expectedError = version > 1 ? NEW_LEADER_ELECTED : FENCED_LEADER_EPOCH; Errors expectedError = version > 1 ? NEW_LEADER_ELECTED : FENCED_LEADER_EPOCH;
assertEquals(new AlterPartitionResponseData().setTopics(singletonList( assertEquals(new AlterPartitionResponseData().setTopics(singletonList(
new AlterPartitionResponseData.TopicData(). new AlterPartitionResponseData.TopicData().
@ -1949,7 +1949,7 @@ public class ReplicationControlManagerTest {
anonymousContextFor(ApiKeys.ALTER_PARTITION, version); anonymousContextFor(ApiKeys.ALTER_PARTITION, version);
ControllerResult<AlterPartitionResponseData> alterPartitionResult = ControllerResult<AlterPartitionResponseData> alterPartitionResult =
replication.alterPartition(requestContext, new AlterPartitionRequest.Builder(alterIsrRequest, version > 1).build(version).data()); replication.alterPartition(requestContext, new AlterPartitionRequest.Builder(alterIsrRequest).build(version).data());
Errors expectedError = version <= 1 ? OPERATION_NOT_ATTEMPTED : INELIGIBLE_REPLICA; Errors expectedError = version <= 1 ? OPERATION_NOT_ATTEMPTED : INELIGIBLE_REPLICA;
assertEquals( assertEquals(
@ -2034,7 +2034,7 @@ public class ReplicationControlManagerTest {
anonymousContextFor(ApiKeys.ALTER_PARTITION, version); anonymousContextFor(ApiKeys.ALTER_PARTITION, version);
ControllerResult<AlterPartitionResponseData> alterPartitionResult = ControllerResult<AlterPartitionResponseData> alterPartitionResult =
replication.alterPartition(requestContext, new AlterPartitionRequest.Builder(alterIsrRequest, version > 1).build(version).data()); replication.alterPartition(requestContext, new AlterPartitionRequest.Builder(alterIsrRequest).build(version).data());
// The late arrived AlterPartition request should be rejected when version >= 3. // The late arrived AlterPartition request should be rejected when version >= 3.
if (version >= 3) { if (version >= 3) {
@ -2099,7 +2099,7 @@ public class ReplicationControlManagerTest {
anonymousContextFor(ApiKeys.ALTER_PARTITION, version); anonymousContextFor(ApiKeys.ALTER_PARTITION, version);
ControllerResult<AlterPartitionResponseData> alterPartitionResult = ControllerResult<AlterPartitionResponseData> alterPartitionResult =
replication.alterPartition(requestContext, new AlterPartitionRequest.Builder(alterIsrRequest, version > 1).build(version).data()); replication.alterPartition(requestContext, new AlterPartitionRequest.Builder(alterIsrRequest).build(version).data());
Errors expectedError = version <= 1 ? OPERATION_NOT_ATTEMPTED : INELIGIBLE_REPLICA; Errors expectedError = version <= 1 ? OPERATION_NOT_ATTEMPTED : INELIGIBLE_REPLICA;
assertEquals( assertEquals(
@ -2954,7 +2954,7 @@ public class ReplicationControlManagerTest {
setNewIsrWithEpochs(isrWithDefaultEpoch(0, 1, 2)))))); setNewIsrWithEpochs(isrWithDefaultEpoch(0, 1, 2))))));
ControllerResult<AlterPartitionResponseData> alterPartitionResult = replication.alterPartition( ControllerResult<AlterPartitionResponseData> alterPartitionResult = replication.alterPartition(
anonymousContextFor(ApiKeys.ALTER_PARTITION), anonymousContextFor(ApiKeys.ALTER_PARTITION),
new AlterPartitionRequest.Builder(alterPartitionRequestData, true).build().data()); new AlterPartitionRequest.Builder(alterPartitionRequestData).build().data());
assertEquals(new AlterPartitionResponseData().setTopics(singletonList( assertEquals(new AlterPartitionResponseData().setTopics(singletonList(
new AlterPartitionResponseData.TopicData(). new AlterPartitionResponseData.TopicData().
setTopicId(topicId). setTopicId(topicId).
@ -3029,7 +3029,7 @@ public class ReplicationControlManagerTest {
setNewIsrWithEpochs(isrWithDefaultEpoch(0, 1, 2, 3, 4, 5)))))); setNewIsrWithEpochs(isrWithDefaultEpoch(0, 1, 2, 3, 4, 5))))));
ControllerResult<AlterPartitionResponseData> alterPartitionResultTwo = replication.alterPartition( ControllerResult<AlterPartitionResponseData> alterPartitionResultTwo = replication.alterPartition(
anonymousContextFor(ApiKeys.ALTER_PARTITION), anonymousContextFor(ApiKeys.ALTER_PARTITION),
new AlterPartitionRequest.Builder(alterPartitionRequestDataTwo, true).build().data()); new AlterPartitionRequest.Builder(alterPartitionRequestDataTwo).build().data());
assertEquals(new AlterPartitionResponseData().setTopics(singletonList( assertEquals(new AlterPartitionResponseData().setTopics(singletonList(
new AlterPartitionResponseData.TopicData(). new AlterPartitionResponseData.TopicData().
setTopicId(topicId). setTopicId(topicId).

View File

@ -85,7 +85,7 @@ public class BootstrapDirectoryTest {
try (BootstrapTestDirectory testDirectory = new BootstrapTestDirectory().createDirectory()) { try (BootstrapTestDirectory testDirectory = new BootstrapTestDirectory().createDirectory()) {
assertEquals(BootstrapMetadata.fromVersion(MetadataVersion.MINIMUM_BOOTSTRAP_VERSION, assertEquals(BootstrapMetadata.fromVersion(MetadataVersion.MINIMUM_BOOTSTRAP_VERSION,
"the minimum version bootstrap with metadata.version 3.3-IV0"), "the minimum version bootstrap with metadata.version 3.3-IV0"),
new BootstrapDirectory(testDirectory.path(), Optional.of("2.7")).read()); new BootstrapDirectory(testDirectory.path(), Optional.of("3.0")).read());
} }
} }

View File

@ -16,7 +16,6 @@
*/ */
package org.apache.kafka.server.common; package org.apache.kafka.server.common;
import org.apache.kafka.common.record.RecordVersion;
import java.util.Arrays; import java.util.Arrays;
import java.util.HashMap; import java.util.HashMap;
@ -45,103 +44,6 @@ import java.util.regex.Pattern;
*/ */
public enum MetadataVersion { public enum MetadataVersion {
IBP_0_8_0(-1, "0.8.0", ""),
IBP_0_8_1(-1, "0.8.1", ""),
IBP_0_8_2(-1, "0.8.2", ""),
IBP_0_9_0(-1, "0.9.0", ""),
// 0.10.0-IV0 is introduced for KIP-31/32 which changes the message format.
IBP_0_10_0_IV0(-1, "0.10.0", "IV0"),
// 0.10.0-IV1 is introduced for KIP-36(rack awareness) and KIP-43(SASL handshake).
IBP_0_10_0_IV1(-1, "0.10.0", "IV1"),
// introduced for JoinGroup protocol change in KIP-62
IBP_0_10_1_IV0(-1, "0.10.1", "IV0"),
// 0.10.1-IV1 is introduced for KIP-74(fetch response size limit).
IBP_0_10_1_IV1(-1, "0.10.1", "IV1"),
// introduced ListOffsetRequest v1 in KIP-79
IBP_0_10_1_IV2(-1, "0.10.1", "IV2"),
// introduced UpdateMetadataRequest v3 in KIP-103
IBP_0_10_2_IV0(-1, "0.10.2", "IV0"),
// KIP-98 (idempotent and transactional producer support)
IBP_0_11_0_IV0(-1, "0.11.0", "IV0"),
// introduced DeleteRecordsRequest v0 and FetchRequest v4 in KIP-107
IBP_0_11_0_IV1(-1, "0.11.0", "IV1"),
// Introduced leader epoch fetches to the replica fetcher via KIP-101
IBP_0_11_0_IV2(-1, "0.11.0", "IV2"),
// Introduced LeaderAndIsrRequest V1, UpdateMetadataRequest V4 and FetchRequest V6 via KIP-112
IBP_1_0_IV0(-1, "1.0", "IV0"),
// Introduced DeleteGroupsRequest V0 via KIP-229, plus KIP-227 incremental fetch requests,
// and KafkaStorageException for fetch requests.
IBP_1_1_IV0(-1, "1.1", "IV0"),
// Introduced OffsetsForLeaderEpochRequest V1 via KIP-279 (Fix log divergence between leader and follower after fast leader fail over)
IBP_2_0_IV0(-1, "2.0", "IV0"),
// Several request versions were bumped due to KIP-219 (Improve quota communication)
IBP_2_0_IV1(-1, "2.0", "IV1"),
// Introduced new schemas for group offset (v2) and group metadata (v2) (KIP-211)
IBP_2_1_IV0(-1, "2.1", "IV0"),
// New Fetch, OffsetsForLeaderEpoch, and ListOffsets schemas (KIP-320)
IBP_2_1_IV1(-1, "2.1", "IV1"),
// Support ZStandard Compression Codec (KIP-110)
IBP_2_1_IV2(-1, "2.1", "IV2"),
// Introduced broker generation (KIP-380), and
// LeaderAndIsrRequest V2, UpdateMetadataRequest V5, StopReplicaRequest V1
IBP_2_2_IV0(-1, "2.2", "IV0"),
// New error code for ListOffsets when a new leader is lagging behind former HW (KIP-207)
IBP_2_2_IV1(-1, "2.2", "IV1"),
// Introduced static membership.
IBP_2_3_IV0(-1, "2.3", "IV0"),
// Add rack_id to FetchRequest, preferred_read_replica to FetchResponse, and replica_id to OffsetsForLeaderRequest
IBP_2_3_IV1(-1, "2.3", "IV1"),
// Add adding_replicas and removing_replicas fields to LeaderAndIsrRequest
IBP_2_4_IV0(-1, "2.4", "IV0"),
// Flexible version support in inter-broker APIs
IBP_2_4_IV1(-1, "2.4", "IV1"),
// No new APIs, equivalent to 2.4-IV1
IBP_2_5_IV0(-1, "2.5", "IV0"),
// Introduced StopReplicaRequest V3 containing the leader epoch for each partition (KIP-570)
IBP_2_6_IV0(-1, "2.6", "IV0"),
// Introduced feature versioning support (KIP-584)
IBP_2_7_IV0(-1, "2.7", "IV0"),
// Bup Fetch protocol for Raft protocol (KIP-595)
IBP_2_7_IV1(-1, "2.7", "IV1"),
// Introduced AlterPartition (KIP-497)
IBP_2_7_IV2(-1, "2.7", "IV2"),
// Flexible versioning on ListOffsets, WriteTxnMarkers and OffsetsForLeaderEpoch. Also adds topic IDs (KIP-516)
IBP_2_8_IV0(-1, "2.8", "IV0"),
// Introduced topic IDs to LeaderAndIsr and UpdateMetadata requests/responses (KIP-516)
IBP_2_8_IV1(-1, "2.8", "IV1"),
// Introduce AllocateProducerIds (KIP-730)
IBP_3_0_IV0(-1, "3.0", "IV0"),
// Introduce ListOffsets V7 which supports listing offsets by max timestamp (KIP-734) // Introduce ListOffsets V7 which supports listing offsets by max timestamp (KIP-734)
// Assume message format version is 3.0 (KIP-724) // Assume message format version is 3.0 (KIP-724)
IBP_3_0_IV1(1, "3.0", "IV1", true), IBP_3_0_IV1(1, "3.0", "IV1", true),
@ -290,34 +192,6 @@ public enum MetadataVersion {
return featureLevel; return featureLevel;
} }
public boolean isSaslInterBrokerHandshakeRequestEnabled() {
return this.isAtLeast(IBP_0_10_0_IV1);
}
public boolean isOffsetForLeaderEpochSupported() {
return this.isAtLeast(IBP_0_11_0_IV2);
}
public boolean isFeatureVersioningSupported() {
return this.isAtLeast(IBP_2_7_IV0);
}
public boolean isTruncationOnFetchSupported() {
return this.isAtLeast(IBP_2_7_IV1);
}
public boolean isAlterPartitionSupported() {
return this.isAtLeast(IBP_2_7_IV2);
}
public boolean isTopicIdsSupported() {
return this.isAtLeast(IBP_2_8_IV0);
}
public boolean isAllocateProducerIdsSupported() {
return this.isAtLeast(IBP_3_0_IV0);
}
public boolean isLeaderRecoverySupported() { public boolean isLeaderRecoverySupported() {
return this.isAtLeast(IBP_3_2_IV0); return this.isAtLeast(IBP_3_2_IV0);
} }
@ -358,16 +232,6 @@ public enum MetadataVersion {
return this.featureLevel > 0; return this.featureLevel > 0;
} }
public RecordVersion highestSupportedRecordVersion() {
if (this.isLessThan(IBP_0_10_0_IV0)) {
return RecordVersion.V0;
} else if (this.isLessThan(IBP_0_11_0_IV0)) {
return RecordVersion.V1;
} else {
return RecordVersion.V2;
}
}
public boolean isBrokerRegistrationChangeRecordSupported() { public boolean isBrokerRegistrationChangeRecordSupported() {
return this.isAtLeast(IBP_3_3_IV2); return this.isAtLeast(IBP_3_3_IV2);
} }
@ -438,43 +302,8 @@ public enum MetadataVersion {
return 14; return 14;
} else if (this.isAtLeast(IBP_3_1_IV0)) { } else if (this.isAtLeast(IBP_3_1_IV0)) {
return 13; return 13;
} else if (this.isAtLeast(IBP_2_7_IV1)) { }
return 12; return 12;
} else if (this.isAtLeast(IBP_2_3_IV1)) {
return 11;
} else if (this.isAtLeast(IBP_2_1_IV2)) {
return 10;
} else if (this.isAtLeast(IBP_2_0_IV1)) {
return 8;
} else if (this.isAtLeast(IBP_1_1_IV0)) {
return 7;
} else if (this.isAtLeast(IBP_0_11_0_IV1)) {
return 5;
} else if (this.isAtLeast(IBP_0_11_0_IV0)) {
return 4;
} else if (this.isAtLeast(IBP_0_10_1_IV1)) {
return 3;
} else if (this.isAtLeast(IBP_0_10_0_IV0)) {
return 2;
} else if (this.isAtLeast(IBP_0_9_0)) {
return 1;
} else {
return 0;
}
}
public short offsetForLeaderEpochRequestVersion() {
if (this.isAtLeast(IBP_2_8_IV0)) {
return 4;
} else if (this.isAtLeast(IBP_2_3_IV1)) {
return 3;
} else if (this.isAtLeast(IBP_2_1_IV1)) {
return 2;
} else if (this.isAtLeast(IBP_2_0_IV0)) {
return 1;
} else {
return 0;
}
} }
public short listOffsetRequestVersion() { public short listOffsetRequestVersion() {
@ -484,48 +313,8 @@ public enum MetadataVersion {
return 9; return 9;
} else if (this.isAtLeast(IBP_3_5_IV0)) { } else if (this.isAtLeast(IBP_3_5_IV0)) {
return 8; return 8;
} else if (this.isAtLeast(IBP_3_0_IV1)) { } else {
return 7; return 7;
} else if (this.isAtLeast(IBP_2_8_IV0)) {
return 6;
} else if (this.isAtLeast(IBP_2_2_IV1)) {
return 5;
} else if (this.isAtLeast(IBP_2_1_IV1)) {
return 4;
} else if (this.isAtLeast(IBP_2_0_IV1)) {
return 3;
} else if (this.isAtLeast(IBP_0_11_0_IV0)) {
return 2;
} else if (this.isAtLeast(IBP_0_10_1_IV2)) {
return 1;
} else {
return 0;
}
}
public short groupMetadataValueVersion() {
if (this.isLessThan(IBP_0_10_1_IV0)) {
return 0;
} else if (this.isLessThan(IBP_2_1_IV0)) {
return 1;
} else if (this.isLessThan(IBP_2_3_IV0)) {
return 2;
} else {
// Serialize with the highest supported non-flexible version
// until a tagged field is introduced or the version is bumped.
return 3;
}
}
public short offsetCommitValueVersion(boolean expireTimestampMs) {
if (isLessThan(MetadataVersion.IBP_2_1_IV0) || expireTimestampMs) {
return 1;
} else if (isLessThan(MetadataVersion.IBP_2_1_IV1)) {
return 2;
} else {
// Serialize with the highest supported non-flexible version
// until a tagged field is introduced or the version is bumped.
return 3;
} }
} }
@ -600,22 +389,6 @@ public enum MetadataVersion {
throw new IllegalArgumentException("No MetadataVersion with feature level " + version); throw new IllegalArgumentException("No MetadataVersion with feature level " + version);
} }
/**
* Return the minimum `MetadataVersion` that supports `RecordVersion`.
*/
public static MetadataVersion minSupportedFor(RecordVersion recordVersion) {
switch (recordVersion) {
case V0:
return IBP_0_8_0;
case V1:
return IBP_0_10_0_IV0;
case V2:
return IBP_0_11_0_IV0;
default:
throw new IllegalArgumentException("Invalid message format version " + recordVersion);
}
}
// Testing only // Testing only
public static MetadataVersion latestTesting() { public static MetadataVersion latestTesting() {
return VERSIONS[VERSIONS.length - 1]; return VERSIONS[VERSIONS.length - 1];
@ -654,14 +427,6 @@ public enum MetadataVersion {
return version != lowVersion; return version != lowVersion;
} }
public short writeTxnMarkersRequestVersion() {
if (isAtLeast(IBP_2_8_IV0)) {
return 1;
} else {
return 0;
}
}
public boolean isAtLeast(MetadataVersion otherVersion) { public boolean isAtLeast(MetadataVersion otherVersion) {
return this.compareTo(otherVersion) >= 0; return this.compareTo(otherVersion) >= 0;
} }

View File

@ -111,7 +111,7 @@ public class FeatureTest {
assertThrows(IllegalArgumentException.class, assertThrows(IllegalArgumentException.class,
() -> Feature.validateVersion( () -> Feature.validateVersion(
TestFeatureVersion.TEST_1, TestFeatureVersion.TEST_1,
Collections.singletonMap(MetadataVersion.FEATURE_NAME, MetadataVersion.IBP_2_8_IV0.featureLevel()) Collections.singletonMap(MetadataVersion.FEATURE_NAME, MetadataVersion.IBP_3_0_IV1.featureLevel())
) )
); );

View File

@ -18,7 +18,6 @@
package org.apache.kafka.server.common; package org.apache.kafka.server.common;
import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.record.RecordVersion;
import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.ParameterizedTest;
@ -27,8 +26,6 @@ import org.junit.jupiter.params.provider.EnumSource;
import static org.apache.kafka.server.common.MetadataVersion.*; import static org.apache.kafka.server.common.MetadataVersion.*;
import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertTrue;
class MetadataVersionTest { class MetadataVersionTest {
@ -50,102 +47,7 @@ class MetadataVersionTest {
@Test @Test
@SuppressWarnings("checkstyle:JavaNCSS") @SuppressWarnings("checkstyle:JavaNCSS")
public void testFromVersionString() { public void testFromVersionString() {
assertEquals(IBP_0_8_0, MetadataVersion.fromVersionString("0.8.0"));
assertEquals(IBP_0_8_0, MetadataVersion.fromVersionString("0.8.0.0"));
assertEquals(IBP_0_8_0, MetadataVersion.fromVersionString("0.8.0.1"));
// should throw an exception as long as IBP_8_0_IV0 is not defined
assertThrows(IllegalArgumentException.class, () -> MetadataVersion.fromVersionString("8.0"));
assertEquals(IBP_0_8_1, MetadataVersion.fromVersionString("0.8.1"));
assertEquals(IBP_0_8_1, MetadataVersion.fromVersionString("0.8.1.0"));
assertEquals(IBP_0_8_1, MetadataVersion.fromVersionString("0.8.1.1"));
assertEquals(IBP_0_8_2, MetadataVersion.fromVersionString("0.8.2"));
assertEquals(IBP_0_8_2, MetadataVersion.fromVersionString("0.8.2.0"));
assertEquals(IBP_0_8_2, MetadataVersion.fromVersionString("0.8.2.1"));
assertEquals(IBP_0_9_0, MetadataVersion.fromVersionString("0.9.0"));
assertEquals(IBP_0_9_0, MetadataVersion.fromVersionString("0.9.0.0"));
assertEquals(IBP_0_9_0, MetadataVersion.fromVersionString("0.9.0.1"));
assertEquals(IBP_0_10_0_IV0, MetadataVersion.fromVersionString("0.10.0-IV0"));
assertEquals(IBP_0_10_0_IV1, MetadataVersion.fromVersionString("0.10.0"));
assertEquals(IBP_0_10_0_IV1, MetadataVersion.fromVersionString("0.10.0.0"));
assertEquals(IBP_0_10_0_IV1, MetadataVersion.fromVersionString("0.10.0.0-IV0"));
assertEquals(IBP_0_10_0_IV1, MetadataVersion.fromVersionString("0.10.0.1"));
assertEquals(IBP_0_10_1_IV0, MetadataVersion.fromVersionString("0.10.1-IV0"));
assertEquals(IBP_0_10_1_IV1, MetadataVersion.fromVersionString("0.10.1-IV1"));
assertEquals(IBP_0_10_1_IV2, MetadataVersion.fromVersionString("0.10.1"));
assertEquals(IBP_0_10_1_IV2, MetadataVersion.fromVersionString("0.10.1.0"));
assertEquals(IBP_0_10_1_IV2, MetadataVersion.fromVersionString("0.10.1-IV2"));
assertEquals(IBP_0_10_1_IV2, MetadataVersion.fromVersionString("0.10.1.1"));
assertEquals(IBP_0_10_2_IV0, MetadataVersion.fromVersionString("0.10.2"));
assertEquals(IBP_0_10_2_IV0, MetadataVersion.fromVersionString("0.10.2.0"));
assertEquals(IBP_0_10_2_IV0, MetadataVersion.fromVersionString("0.10.2-IV0"));
assertEquals(IBP_0_10_2_IV0, MetadataVersion.fromVersionString("0.10.2.1"));
assertEquals(IBP_0_11_0_IV0, MetadataVersion.fromVersionString("0.11.0-IV0"));
assertEquals(IBP_0_11_0_IV1, MetadataVersion.fromVersionString("0.11.0-IV1"));
assertEquals(IBP_0_11_0_IV2, MetadataVersion.fromVersionString("0.11.0"));
assertEquals(IBP_0_11_0_IV2, MetadataVersion.fromVersionString("0.11.0.0"));
assertEquals(IBP_0_11_0_IV2, MetadataVersion.fromVersionString("0.11.0-IV2"));
assertEquals(IBP_0_11_0_IV2, MetadataVersion.fromVersionString("0.11.0.1"));
assertEquals(IBP_1_0_IV0, MetadataVersion.fromVersionString("1.0"));
assertEquals(IBP_1_0_IV0, MetadataVersion.fromVersionString("1.0.0"));
assertEquals(IBP_1_0_IV0, MetadataVersion.fromVersionString("1.0.0-IV0"));
assertEquals(IBP_1_0_IV0, MetadataVersion.fromVersionString("1.0.1"));
assertThrows(IllegalArgumentException.class, () -> MetadataVersion.fromVersionString("0.1.0"));
assertThrows(IllegalArgumentException.class, () -> MetadataVersion.fromVersionString("0.1.0.0"));
assertThrows(IllegalArgumentException.class, () -> MetadataVersion.fromVersionString("0.1.0-IV0"));
assertThrows(IllegalArgumentException.class, () -> MetadataVersion.fromVersionString("0.1.0.0-IV0"));
assertEquals(IBP_1_1_IV0, MetadataVersion.fromVersionString("1.1-IV0"));
assertEquals(IBP_2_0_IV1, MetadataVersion.fromVersionString("2.0"));
assertEquals(IBP_2_0_IV0, MetadataVersion.fromVersionString("2.0-IV0"));
assertEquals(IBP_2_0_IV1, MetadataVersion.fromVersionString("2.0-IV1"));
assertEquals(IBP_2_1_IV2, MetadataVersion.fromVersionString("2.1"));
assertEquals(IBP_2_1_IV0, MetadataVersion.fromVersionString("2.1-IV0"));
assertEquals(IBP_2_1_IV1, MetadataVersion.fromVersionString("2.1-IV1"));
assertEquals(IBP_2_1_IV2, MetadataVersion.fromVersionString("2.1-IV2"));
assertEquals(IBP_2_2_IV1, MetadataVersion.fromVersionString("2.2"));
assertEquals(IBP_2_2_IV0, MetadataVersion.fromVersionString("2.2-IV0"));
assertEquals(IBP_2_2_IV1, MetadataVersion.fromVersionString("2.2-IV1"));
assertEquals(IBP_2_3_IV1, MetadataVersion.fromVersionString("2.3"));
assertEquals(IBP_2_3_IV0, MetadataVersion.fromVersionString("2.3-IV0"));
assertEquals(IBP_2_3_IV1, MetadataVersion.fromVersionString("2.3-IV1"));
assertEquals(IBP_2_4_IV1, MetadataVersion.fromVersionString("2.4"));
assertEquals(IBP_2_4_IV0, MetadataVersion.fromVersionString("2.4-IV0"));
assertEquals(IBP_2_4_IV1, MetadataVersion.fromVersionString("2.4-IV1"));
assertEquals(IBP_2_5_IV0, MetadataVersion.fromVersionString("2.5"));
assertEquals(IBP_2_5_IV0, MetadataVersion.fromVersionString("2.5-IV0"));
assertEquals(IBP_2_6_IV0, MetadataVersion.fromVersionString("2.6"));
assertEquals(IBP_2_6_IV0, MetadataVersion.fromVersionString("2.6-IV0"));
// 2.7-IV2 is the latest production version in the 2.7 line
assertEquals(IBP_2_7_IV2, MetadataVersion.fromVersionString("2.7"));
assertEquals(IBP_2_7_IV0, MetadataVersion.fromVersionString("2.7-IV0"));
assertEquals(IBP_2_7_IV1, MetadataVersion.fromVersionString("2.7-IV1"));
assertEquals(IBP_2_7_IV2, MetadataVersion.fromVersionString("2.7-IV2"));
assertEquals(IBP_2_8_IV1, MetadataVersion.fromVersionString("2.8"));
assertEquals(IBP_2_8_IV0, MetadataVersion.fromVersionString("2.8-IV0"));
assertEquals(IBP_2_8_IV1, MetadataVersion.fromVersionString("2.8-IV1"));
assertEquals(IBP_3_0_IV1, MetadataVersion.fromVersionString("3.0")); assertEquals(IBP_3_0_IV1, MetadataVersion.fromVersionString("3.0"));
assertEquals(IBP_3_0_IV0, MetadataVersion.fromVersionString("3.0-IV0"));
assertEquals(IBP_3_0_IV1, MetadataVersion.fromVersionString("3.0-IV1")); assertEquals(IBP_3_0_IV1, MetadataVersion.fromVersionString("3.0-IV1"));
assertEquals(IBP_3_1_IV0, MetadataVersion.fromVersionString("3.1")); assertEquals(IBP_3_1_IV0, MetadataVersion.fromVersionString("3.1"));
@ -199,44 +101,8 @@ class MetadataVersionTest {
assertEquals(IBP_4_0_IV3, MetadataVersion.fromVersionString("4.0-IV3")); assertEquals(IBP_4_0_IV3, MetadataVersion.fromVersionString("4.0-IV3"));
} }
@Test
public void testMinSupportedVersionFor() {
assertEquals(IBP_0_8_0, MetadataVersion.minSupportedFor(RecordVersion.V0));
assertEquals(IBP_0_10_0_IV0, MetadataVersion.minSupportedFor(RecordVersion.V1));
assertEquals(IBP_0_11_0_IV0, MetadataVersion.minSupportedFor(RecordVersion.V2));
// Ensure that all record versions have a defined min version so that we remember to update the method
for (RecordVersion recordVersion : RecordVersion.values()) {
assertNotNull(MetadataVersion.minSupportedFor(recordVersion));
}
}
@Test @Test
public void testShortVersion() { public void testShortVersion() {
assertEquals("0.8.0", IBP_0_8_0.shortVersion());
assertEquals("0.10.0", IBP_0_10_0_IV0.shortVersion());
assertEquals("0.10.0", IBP_0_10_0_IV1.shortVersion());
assertEquals("0.11.0", IBP_0_11_0_IV0.shortVersion());
assertEquals("0.11.0", IBP_0_11_0_IV1.shortVersion());
assertEquals("0.11.0", IBP_0_11_0_IV2.shortVersion());
assertEquals("1.0", IBP_1_0_IV0.shortVersion());
assertEquals("1.1", IBP_1_1_IV0.shortVersion());
assertEquals("2.0", IBP_2_0_IV0.shortVersion());
assertEquals("2.0", IBP_2_0_IV1.shortVersion());
assertEquals("2.1", IBP_2_1_IV0.shortVersion());
assertEquals("2.1", IBP_2_1_IV1.shortVersion());
assertEquals("2.1", IBP_2_1_IV2.shortVersion());
assertEquals("2.2", IBP_2_2_IV0.shortVersion());
assertEquals("2.2", IBP_2_2_IV1.shortVersion());
assertEquals("2.3", IBP_2_3_IV0.shortVersion());
assertEquals("2.3", IBP_2_3_IV1.shortVersion());
assertEquals("2.4", IBP_2_4_IV0.shortVersion());
assertEquals("2.5", IBP_2_5_IV0.shortVersion());
assertEquals("2.6", IBP_2_6_IV0.shortVersion());
assertEquals("2.7", IBP_2_7_IV2.shortVersion());
assertEquals("2.8", IBP_2_8_IV0.shortVersion());
assertEquals("2.8", IBP_2_8_IV1.shortVersion());
assertEquals("3.0", IBP_3_0_IV0.shortVersion());
assertEquals("3.0", IBP_3_0_IV1.shortVersion()); assertEquals("3.0", IBP_3_0_IV1.shortVersion());
assertEquals("3.1", IBP_3_1_IV0.shortVersion()); assertEquals("3.1", IBP_3_1_IV0.shortVersion());
assertEquals("3.2", IBP_3_2_IV0.shortVersion()); assertEquals("3.2", IBP_3_2_IV0.shortVersion());
@ -266,31 +132,6 @@ class MetadataVersionTest {
@Test @Test
public void testVersion() { public void testVersion() {
assertEquals("0.8.0", IBP_0_8_0.version());
assertEquals("0.8.2", IBP_0_8_2.version());
assertEquals("0.10.0-IV0", IBP_0_10_0_IV0.version());
assertEquals("0.10.0-IV1", IBP_0_10_0_IV1.version());
assertEquals("0.11.0-IV0", IBP_0_11_0_IV0.version());
assertEquals("0.11.0-IV1", IBP_0_11_0_IV1.version());
assertEquals("0.11.0-IV2", IBP_0_11_0_IV2.version());
assertEquals("1.0-IV0", IBP_1_0_IV0.version());
assertEquals("1.1-IV0", IBP_1_1_IV0.version());
assertEquals("2.0-IV0", IBP_2_0_IV0.version());
assertEquals("2.0-IV1", IBP_2_0_IV1.version());
assertEquals("2.1-IV0", IBP_2_1_IV0.version());
assertEquals("2.1-IV1", IBP_2_1_IV1.version());
assertEquals("2.1-IV2", IBP_2_1_IV2.version());
assertEquals("2.2-IV0", IBP_2_2_IV0.version());
assertEquals("2.2-IV1", IBP_2_2_IV1.version());
assertEquals("2.3-IV0", IBP_2_3_IV0.version());
assertEquals("2.3-IV1", IBP_2_3_IV1.version());
assertEquals("2.4-IV0", IBP_2_4_IV0.version());
assertEquals("2.5-IV0", IBP_2_5_IV0.version());
assertEquals("2.6-IV0", IBP_2_6_IV0.version());
assertEquals("2.7-IV2", IBP_2_7_IV2.version());
assertEquals("2.8-IV0", IBP_2_8_IV0.version());
assertEquals("2.8-IV1", IBP_2_8_IV1.version());
assertEquals("3.0-IV0", IBP_3_0_IV0.version());
assertEquals("3.0-IV1", IBP_3_0_IV1.version()); assertEquals("3.0-IV1", IBP_3_0_IV1.version());
assertEquals("3.1-IV0", IBP_3_1_IV0.version()); assertEquals("3.1-IV0", IBP_3_1_IV0.version());
assertEquals("3.2-IV0", IBP_3_2_IV0.version()); assertEquals("3.2-IV0", IBP_3_2_IV0.version());
@ -332,13 +173,12 @@ class MetadataVersionTest {
assertFalse(MetadataVersion.checkIfMetadataChanged(IBP_3_2_IV0, IBP_3_2_IV0)); assertFalse(MetadataVersion.checkIfMetadataChanged(IBP_3_2_IV0, IBP_3_2_IV0));
assertTrue(MetadataVersion.checkIfMetadataChanged(IBP_3_2_IV0, IBP_3_1_IV0)); assertTrue(MetadataVersion.checkIfMetadataChanged(IBP_3_2_IV0, IBP_3_1_IV0));
assertTrue(MetadataVersion.checkIfMetadataChanged(IBP_3_2_IV0, IBP_3_0_IV1)); assertTrue(MetadataVersion.checkIfMetadataChanged(IBP_3_2_IV0, IBP_3_0_IV1));
assertTrue(MetadataVersion.checkIfMetadataChanged(IBP_3_2_IV0, IBP_3_0_IV0)); assertTrue(MetadataVersion.checkIfMetadataChanged(IBP_3_2_IV0, IBP_3_0_IV1));
assertTrue(MetadataVersion.checkIfMetadataChanged(IBP_3_2_IV0, IBP_2_8_IV1));
assertTrue(MetadataVersion.checkIfMetadataChanged(IBP_3_3_IV1, IBP_3_3_IV0)); assertTrue(MetadataVersion.checkIfMetadataChanged(IBP_3_3_IV1, IBP_3_3_IV0));
// Check that argument order doesn't matter // Check that argument order doesn't matter
assertTrue(MetadataVersion.checkIfMetadataChanged(IBP_3_0_IV0, IBP_3_2_IV0)); assertTrue(MetadataVersion.checkIfMetadataChanged(IBP_3_1_IV0, IBP_3_2_IV0));
assertTrue(MetadataVersion.checkIfMetadataChanged(IBP_2_8_IV1, IBP_3_2_IV0)); assertTrue(MetadataVersion.checkIfMetadataChanged(IBP_3_0_IV1, IBP_3_2_IV0));
} }
@Test @Test
@ -430,42 +270,6 @@ class MetadataVersionTest {
assertEquals(expectedVersion, metadataVersion.registerBrokerRecordVersion()); assertEquals(expectedVersion, metadataVersion.registerBrokerRecordVersion());
} }
@ParameterizedTest
@EnumSource(value = MetadataVersion.class)
public void testGroupMetadataValueVersion(MetadataVersion metadataVersion) {
final short expectedVersion;
if (metadataVersion.isAtLeast(MetadataVersion.IBP_2_3_IV0)) {
expectedVersion = 3;
} else if (metadataVersion.isAtLeast(IBP_2_1_IV0)) {
expectedVersion = 2;
} else if (metadataVersion.isAtLeast(IBP_0_10_1_IV0)) {
expectedVersion = 1;
} else {
expectedVersion = 0;
}
assertEquals(expectedVersion, metadataVersion.groupMetadataValueVersion());
}
@ParameterizedTest
@EnumSource(value = MetadataVersion.class)
public void testOffsetCommitValueVersion(MetadataVersion metadataVersion) {
final short expectedVersion;
if (metadataVersion.isAtLeast(MetadataVersion.IBP_2_1_IV1)) {
expectedVersion = 3;
} else if (metadataVersion.isAtLeast(IBP_2_1_IV0)) {
expectedVersion = 2;
} else {
expectedVersion = 1;
}
assertEquals(expectedVersion, metadataVersion.offsetCommitValueVersion(false));
}
@ParameterizedTest
@EnumSource(value = MetadataVersion.class)
public void testOffsetCommitValueVersionWithExpiredTimestamp(MetadataVersion metadataVersion) {
assertEquals((short) 1, metadataVersion.offsetCommitValueVersion(true));
}
@Test @Test
public void assertLatestProductionIsLessThanLatest() { public void assertLatestProductionIsLessThanLatest() {
assertTrue(LATEST_PRODUCTION.ordinal() < MetadataVersion.latestTesting().ordinal(), assertTrue(LATEST_PRODUCTION.ordinal() < MetadataVersion.latestTesting().ordinal(),
@ -483,8 +287,6 @@ class MetadataVersionTest {
MetadataVersion mv = MetadataVersion.latestProduction(); MetadataVersion mv = MetadataVersion.latestProduction();
assertTrue(mv.listOffsetRequestVersion() <= ApiKeys.LIST_OFFSETS.latestVersion(false)); assertTrue(mv.listOffsetRequestVersion() <= ApiKeys.LIST_OFFSETS.latestVersion(false));
assertTrue(mv.fetchRequestVersion() <= ApiKeys.FETCH.latestVersion(false)); assertTrue(mv.fetchRequestVersion() <= ApiKeys.FETCH.latestVersion(false));
assertTrue(mv.offsetForLeaderEpochRequestVersion() <= ApiKeys.OFFSET_FOR_LEADER_EPOCH.latestVersion(false));
assertTrue(mv.writeTxnMarkersRequestVersion() <= ApiKeys.WRITE_TXN_MARKERS.latestVersion(false));
} }
@Test @Test

View File

@ -21,7 +21,6 @@ import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.compress.Compression; import org.apache.kafka.common.compress.Compression;
import org.apache.kafka.common.errors.CorruptRecordException; import org.apache.kafka.common.errors.CorruptRecordException;
import org.apache.kafka.common.errors.InvalidTimestampException; import org.apache.kafka.common.errors.InvalidTimestampException;
import org.apache.kafka.common.errors.UnsupportedCompressionTypeException;
import org.apache.kafka.common.errors.UnsupportedForMessageFormatException; import org.apache.kafka.common.errors.UnsupportedForMessageFormatException;
import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.record.AbstractRecords; import org.apache.kafka.common.record.AbstractRecords;
@ -40,7 +39,6 @@ import org.apache.kafka.common.utils.CloseableIterator;
import org.apache.kafka.common.utils.PrimitiveRef; import org.apache.kafka.common.utils.PrimitiveRef;
import org.apache.kafka.common.utils.PrimitiveRef.LongRef; import org.apache.kafka.common.utils.PrimitiveRef.LongRef;
import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Time;
import org.apache.kafka.server.common.MetadataVersion;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import java.util.ArrayList; import java.util.ArrayList;
@ -49,8 +47,6 @@ import java.util.List;
import java.util.Optional; import java.util.Optional;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import static org.apache.kafka.server.common.MetadataVersion.IBP_2_1_IV0;
public class LogValidator { public class LogValidator {
public interface MetricsRecorder { public interface MetricsRecorder {
@ -110,7 +106,6 @@ public class LogValidator {
private final long timestampAfterMaxMs; private final long timestampAfterMaxMs;
private final int partitionLeaderEpoch; private final int partitionLeaderEpoch;
private final AppendOrigin origin; private final AppendOrigin origin;
private final MetadataVersion interBrokerProtocolVersion;
public LogValidator(MemoryRecords records, public LogValidator(MemoryRecords records,
TopicPartition topicPartition, TopicPartition topicPartition,
@ -123,8 +118,7 @@ public class LogValidator {
long timestampBeforeMaxMs, long timestampBeforeMaxMs,
long timestampAfterMaxMs, long timestampAfterMaxMs,
int partitionLeaderEpoch, int partitionLeaderEpoch,
AppendOrigin origin, AppendOrigin origin) {
MetadataVersion interBrokerProtocolVersion) {
this.records = records; this.records = records;
this.topicPartition = topicPartition; this.topicPartition = topicPartition;
this.time = time; this.time = time;
@ -137,7 +131,6 @@ public class LogValidator {
this.timestampAfterMaxMs = timestampAfterMaxMs; this.timestampAfterMaxMs = timestampAfterMaxMs;
this.partitionLeaderEpoch = partitionLeaderEpoch; this.partitionLeaderEpoch = partitionLeaderEpoch;
this.origin = origin; this.origin = origin;
this.interBrokerProtocolVersion = interBrokerProtocolVersion;
} }
/** /**
@ -332,10 +325,6 @@ public class LogValidator {
public ValidationResult validateMessagesAndAssignOffsetsCompressed(LongRef offsetCounter, public ValidationResult validateMessagesAndAssignOffsetsCompressed(LongRef offsetCounter,
MetricsRecorder metricsRecorder, MetricsRecorder metricsRecorder,
BufferSupplier bufferSupplier) { BufferSupplier bufferSupplier) {
if (targetCompression.type() == CompressionType.ZSTD && interBrokerProtocolVersion.isLessThan(IBP_2_1_IV0))
throw new UnsupportedCompressionTypeException("Produce requests to inter.broker.protocol.version < 2.1 broker " +
"are not allowed to use ZStandard compression");
// No in place assignment situation 1 // No in place assignment situation 1
boolean inPlaceAssignment = sourceCompressionType == targetCompression.type(); boolean inPlaceAssignment = sourceCompressionType == targetCompression.type();
long now = time.milliseconds(); long now = time.milliseconds();

View File

@ -21,7 +21,6 @@ import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.compress.Compression; import org.apache.kafka.common.compress.Compression;
import org.apache.kafka.common.errors.CorruptRecordException; import org.apache.kafka.common.errors.CorruptRecordException;
import org.apache.kafka.common.errors.InvalidTimestampException; import org.apache.kafka.common.errors.InvalidTimestampException;
import org.apache.kafka.common.errors.UnsupportedCompressionTypeException;
import org.apache.kafka.common.errors.UnsupportedForMessageFormatException; import org.apache.kafka.common.errors.UnsupportedForMessageFormatException;
import org.apache.kafka.common.record.CompressionType; import org.apache.kafka.common.record.CompressionType;
import org.apache.kafka.common.record.ControlRecordType; import org.apache.kafka.common.record.ControlRecordType;
@ -38,7 +37,6 @@ import org.apache.kafka.common.record.SimpleRecord;
import org.apache.kafka.common.record.TimestampType; import org.apache.kafka.common.record.TimestampType;
import org.apache.kafka.common.utils.PrimitiveRef; import org.apache.kafka.common.utils.PrimitiveRef;
import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Time;
import org.apache.kafka.server.common.MetadataVersion;
import org.apache.kafka.server.common.RequestLocal; import org.apache.kafka.server.common.RequestLocal;
import org.apache.kafka.server.util.MockTime; import org.apache.kafka.server.util.MockTime;
import org.apache.kafka.storage.internals.log.LogValidator.ValidationResult; import org.apache.kafka.storage.internals.log.LogValidator.ValidationResult;
@ -186,8 +184,7 @@ public class LogValidatorTest {
5000L, 5000L,
5000L, 5000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier()
)); ));
@ -221,8 +218,7 @@ public class LogValidatorTest {
1000L, 1000L,
1000L, 1000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
); );
LogValidator.ValidationResult validatedResults = validator.validateMessagesAndAssignOffsets( LogValidator.ValidationResult validatedResults = validator.validateMessagesAndAssignOffsets(
@ -271,8 +267,7 @@ public class LogValidatorTest {
1000L, 1000L,
1000L, 1000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
); );
LogValidator.ValidationResult validatedResults = logValidator.validateMessagesAndAssignOffsets( LogValidator.ValidationResult validatedResults = logValidator.validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(0), PrimitiveRef.ofLong(0),
@ -355,8 +350,7 @@ public class LogValidatorTest {
1000L, 1000L,
1000L, 1000L,
partitionLeaderEpoch, partitionLeaderEpoch,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(0L), PrimitiveRef.ofLong(0L),
metricsRecorder, metricsRecorder,
@ -461,8 +455,7 @@ public class LogValidatorTest {
1000L, 1000L,
1000L, 1000L,
RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_PRODUCER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.IBP_2_3_IV1
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(0L), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier()); PrimitiveRef.ofLong(0L), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier());
} }
@ -547,8 +540,7 @@ public class LogValidatorTest {
1000L, 1000L,
1000L, 1000L,
partitionLeaderEpoch, partitionLeaderEpoch,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
); );
LogValidator.ValidationResult validatedResults = validator.validateMessagesAndAssignOffsets( LogValidator.ValidationResult validatedResults = validator.validateMessagesAndAssignOffsets(
@ -636,8 +628,7 @@ public class LogValidatorTest {
1000L, 1000L,
1000L, 1000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(0), PrimitiveRef.ofLong(0),
metricsRecorder, metricsRecorder,
@ -668,8 +659,7 @@ public class LogValidatorTest {
1000L, 1000L,
1000L, 1000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(0), PrimitiveRef.ofLong(0),
metricsRecorder, metricsRecorder,
@ -700,8 +690,7 @@ public class LogValidatorTest {
1000L, 1000L,
1000L, 1000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(0), PrimitiveRef.ofLong(0),
metricsRecorder, metricsRecorder,
@ -743,8 +732,7 @@ public class LogValidatorTest {
1000L, 1000L,
1000L, 1000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
); );
@ -797,8 +785,7 @@ public class LogValidatorTest {
1000L, 1000L,
1000L, 1000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
); );
@ -835,8 +822,7 @@ public class LogValidatorTest {
1000L, 1000L,
1000L, 1000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(0), PrimitiveRef.ofLong(0),
metricsRecorder, metricsRecorder,
@ -869,8 +855,7 @@ public class LogValidatorTest {
1000L, 1000L,
1000L, 1000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(0), PrimitiveRef.ofLong(0),
metricsRecorder, metricsRecorder,
@ -899,8 +884,7 @@ public class LogValidatorTest {
1000L, 1000L,
1000L, 1000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(offset), PrimitiveRef.ofLong(offset),
metricsRecorder, metricsRecorder,
@ -931,8 +915,7 @@ public class LogValidatorTest {
1000L, 1000L,
1000L, 1000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(offset), PrimitiveRef.ofLong(offset),
metricsRecorder, metricsRecorder,
@ -962,8 +945,7 @@ public class LogValidatorTest {
5000L, 5000L,
5000L, 5000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(offset), PrimitiveRef.ofLong(offset),
metricsRecorder, metricsRecorder,
@ -993,8 +975,7 @@ public class LogValidatorTest {
5000L, 5000L,
5000L, 5000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(offset), PrimitiveRef.ofLong(offset),
metricsRecorder, metricsRecorder,
@ -1025,8 +1006,7 @@ public class LogValidatorTest {
5000L, 5000L,
5000L, 5000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(offset), PrimitiveRef.ofLong(offset),
metricsRecorder, metricsRecorder,
@ -1057,8 +1037,7 @@ public class LogValidatorTest {
5000L, 5000L,
5000L, 5000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(offset), PrimitiveRef.ofLong(offset),
metricsRecorder, metricsRecorder,
@ -1087,8 +1066,7 @@ public class LogValidatorTest {
1000L, 1000L,
1000L, 1000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(offset), PrimitiveRef.ofLong(offset),
metricsRecorder, metricsRecorder,
@ -1123,8 +1101,7 @@ public class LogValidatorTest {
1000L, 1000L,
1000L, 1000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(offset), PrimitiveRef.ofLong(offset),
metricsRecorder, metricsRecorder,
@ -1160,8 +1137,7 @@ public class LogValidatorTest {
1000L, 1000L,
1000L, 1000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(offset), PrimitiveRef.ofLong(offset),
metricsRecorder, metricsRecorder,
@ -1197,8 +1173,7 @@ public class LogValidatorTest {
1000L, 1000L,
1000L, 1000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(offset), PrimitiveRef.ofLong(offset),
metricsRecorder, metricsRecorder,
@ -1231,8 +1206,7 @@ public class LogValidatorTest {
5000L, 5000L,
5000L, 5000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(offset), PrimitiveRef.ofLong(offset),
metricsRecorder, metricsRecorder,
@ -1257,8 +1231,7 @@ public class LogValidatorTest {
5000L, 5000L,
5000L, 5000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.COORDINATOR, AppendOrigin.COORDINATOR
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(offset), PrimitiveRef.ofLong(offset),
metricsRecorder, metricsRecorder,
@ -1287,8 +1260,7 @@ public class LogValidatorTest {
5000L, 5000L,
5000L, 5000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(offset), PrimitiveRef.ofLong(offset),
metricsRecorder, metricsRecorder,
@ -1315,8 +1287,7 @@ public class LogValidatorTest {
5000L, 5000L,
5000L, 5000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier()
).validatedRecords, offset); ).validatedRecords, offset);
@ -1339,8 +1310,7 @@ public class LogValidatorTest {
1000L, 1000L,
1000L, 1000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier()
).validatedRecords, offset); ).validatedRecords, offset);
@ -1364,8 +1334,7 @@ public class LogValidatorTest {
1000L, 1000L,
1000L, 1000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier()
).validatedRecords, offset); ).validatedRecords, offset);
@ -1389,8 +1358,7 @@ public class LogValidatorTest {
5000L, 5000L,
5000L, 5000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier()
).validatedRecords, offset); ).validatedRecords, offset);
@ -1415,8 +1383,7 @@ public class LogValidatorTest {
5000L, 5000L,
5000L, 5000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier()
).validatedRecords, offset); ).validatedRecords, offset);
@ -1442,8 +1409,7 @@ public class LogValidatorTest {
5000L, 5000L,
5000L, 5000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier()
)); ));
@ -1470,8 +1436,7 @@ public class LogValidatorTest {
5000L, 5000L,
5000L, 5000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier()
)); ));
@ -1495,8 +1460,7 @@ public class LogValidatorTest {
5000L, 5000L,
5000L, 5000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier()
).validatedRecords, offset); ).validatedRecords, offset);
@ -1522,8 +1486,7 @@ public class LogValidatorTest {
5000L, 5000L,
5000L, 5000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier()
).validatedRecords, offset); ).validatedRecords, offset);
@ -1551,8 +1514,7 @@ public class LogValidatorTest {
5000L, 5000L,
5000L, 5000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(0L), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() PrimitiveRef.ofLong(0L), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier()
)); ));
@ -1560,31 +1522,6 @@ public class LogValidatorTest {
assertEquals(metricsRecorder.recordInvalidOffsetCount, 1); assertEquals(metricsRecorder.recordInvalidOffsetCount, 1);
} }
@Test
public void testZStdCompressedWithUnavailableIBPVersion() {
// The timestamps should be overwritten
MemoryRecords records = createRecords(RecordBatch.MAGIC_VALUE_V2, 1234L, Compression.NONE);
assertThrows(UnsupportedCompressionTypeException.class, () ->
new LogValidator(
records,
topicPartition,
time,
CompressionType.NONE,
Compression.zstd().build(),
false,
RecordBatch.MAGIC_VALUE_V2,
TimestampType.LOG_APPEND_TIME,
1000L,
1000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT,
MetadataVersion.IBP_2_0_IV1
).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(0L), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier()
)
);
}
@Test @Test
public void testInvalidTimestampExceptionHasBatchIndex() { public void testInvalidTimestampExceptionHasBatchIndex() {
long now = System.currentTimeMillis(); long now = System.currentTimeMillis();
@ -1604,8 +1541,7 @@ public class LogValidatorTest {
1000L, 1000L,
1000L, 1000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(0L), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() PrimitiveRef.ofLong(0L), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier()
) )
@ -1691,8 +1627,7 @@ public class LogValidatorTest {
timestampBeforeMaxConfig, timestampBeforeMaxConfig,
timestampAfterMaxConfig, timestampAfterMaxConfig,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(0L), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() PrimitiveRef.ofLong(0L), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier()
) )
@ -1724,8 +1659,7 @@ public class LogValidatorTest {
timestampBeforeMaxConfig, timestampBeforeMaxConfig,
timestampAfterMaxConfig, timestampAfterMaxConfig,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(0L), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() PrimitiveRef.ofLong(0L), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier()
) )
@ -1766,8 +1700,7 @@ public class LogValidatorTest {
5000L, 5000L,
5000L, 5000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
); );
LogValidator.ValidationResult result = validator.validateMessagesAndAssignOffsets( LogValidator.ValidationResult result = validator.validateMessagesAndAssignOffsets(
@ -1805,8 +1738,7 @@ public class LogValidatorTest {
5000L, 5000L,
5000L, 5000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
); );
LogValidator.ValidationResult result = validator.validateMessagesAndAssignOffsets( LogValidator.ValidationResult result = validator.validateMessagesAndAssignOffsets(
@ -1865,8 +1797,7 @@ public class LogValidatorTest {
1000L, 1000L,
1000L, 1000L,
partitionLeaderEpoch, partitionLeaderEpoch,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
offsetCounter, offsetCounter,
metricsRecorder, metricsRecorder,
@ -1934,8 +1865,7 @@ public class LogValidatorTest {
1000L, 1000L,
1000L, 1000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(0L), PrimitiveRef.ofLong(0L),
metricsRecorder, metricsRecorder,
@ -1961,8 +1891,7 @@ public class LogValidatorTest {
1000L, 1000L,
1000L, 1000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(0), PrimitiveRef.ofLong(0),
metricsRecorder, metricsRecorder,
@ -2006,8 +1935,7 @@ public class LogValidatorTest {
1000L, 1000L,
1000L, 1000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(0), PrimitiveRef.ofLong(0),
metricsRecorder, metricsRecorder,
@ -2049,8 +1977,7 @@ public class LogValidatorTest {
1000L, 1000L,
1000L, 1000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH, RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT, AppendOrigin.CLIENT
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets( ).validateMessagesAndAssignOffsets(
offsetCounter, offsetCounter,
metricsRecorder, metricsRecorder,

View File

@ -65,7 +65,7 @@ public class ClusterConfigTest {
.setControllerSecurityProtocol(SecurityProtocol.SASL_PLAINTEXT) .setControllerSecurityProtocol(SecurityProtocol.SASL_PLAINTEXT)
.setControllerListenerName(ListenerName.normalised("CONTROLLER")) .setControllerListenerName(ListenerName.normalised("CONTROLLER"))
.setTrustStoreFile(trustStoreFile) .setTrustStoreFile(trustStoreFile)
.setMetadataVersion(MetadataVersion.IBP_0_8_0) .setMetadataVersion(MetadataVersion.IBP_3_0_IV1)
.setServerProperties(Collections.singletonMap("broker", "broker_value")) .setServerProperties(Collections.singletonMap("broker", "broker_value"))
.setConsumerProperties(Collections.singletonMap("consumer", "consumer_value")) .setConsumerProperties(Collections.singletonMap("consumer", "consumer_value"))
.setProducerProperties(Collections.singletonMap("producer", "producer_value")) .setProducerProperties(Collections.singletonMap("producer", "producer_value"))

View File

@ -196,7 +196,7 @@ public class FeatureCommand {
); );
versionMappingParser.addArgument("--release-version") versionMappingParser.addArgument("--release-version")
.help("The release version to use for the corresponding feature mapping. The minimum is " + .help("The release version to use for the corresponding feature mapping. The minimum is " +
MetadataVersion.IBP_3_0_IV0 + "; the default is " + MetadataVersion.LATEST_PRODUCTION) MetadataVersion.IBP_3_0_IV1 + "; the default is " + MetadataVersion.LATEST_PRODUCTION)
.action(store()); .action(store());
} }