KAFKA-15949: Unify metadata.version format in log and error message (#15505)

There were different words for metadata.version like metadata version or metadataVersion. Unify format as metadata.version.

Reviewers: Luke Chen <showuon@gmail.com>
This commit is contained in:
PoAn Yang 2024-03-26 20:09:29 +08:00 committed by GitHub
parent 2d4abb85bf
commit 6f8d4fe26b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
21 changed files with 34 additions and 34 deletions

View File

@ -205,7 +205,7 @@ class ControllerRegistrationManager(
debug("maybeSendControllerRegistration: cannot register yet because the channel manager has " +
"not been initialized.")
} else if (!metadataVersion.isControllerRegistrationSupported) {
info("maybeSendControllerRegistration: cannot register yet because the metadata version is " +
info("maybeSendControllerRegistration: cannot register yet because the metadata.version is " +
s"still $metadataVersion, which does not support KIP-919 controller registration.")
} else if (pendingRpc) {
info("maybeSendControllerRegistration: waiting for the previous RPC to complete.")

View File

@ -2516,7 +2516,7 @@ class KafkaApis(val requestChannel: RequestChannel,
def ensureInterBrokerVersion(version: MetadataVersion): Unit = {
if (config.interBrokerProtocolVersion.isLessThan(version))
throw new UnsupportedVersionException(s"inter.broker.protocol.version: ${config.interBrokerProtocolVersion.version} is less than the required version: ${version.version}")
throw new UnsupportedVersionException(s"inter.broker.protocol.version: ${config.interBrokerProtocolVersion} is less than the required version: ${version}")
}
def handleAddPartitionsToTxnRequest(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = {

View File

@ -1733,7 +1733,7 @@ class KafkaConfig private(doLog: Boolean, val props: java.util.Map[_, _], dynami
} else {
warn(s"${KafkaConfig.InterBrokerProtocolVersionProp} is deprecated in KRaft mode as of 3.3 and will only " +
s"be read when first upgrading from a KRaft prior to 3.3. See kafka-storage.sh help for details on setting " +
s"the metadata version for a new KRaft cluster.")
s"the metadata.version for a new KRaft cluster.")
}
}
// In KRaft mode, we pin this value to the minimum KRaft-supported version. This prevents inadvertent usage of

View File

@ -62,13 +62,13 @@ object StorageTool extends Logging {
val metadataVersion = getMetadataVersion(namespace,
Option(config.get.originals.get(KafkaConfig.InterBrokerProtocolVersionProp)).map(_.toString))
if (!metadataVersion.isKRaftSupported) {
throw new TerseFailure(s"Must specify a valid KRaft metadata version of at least 3.0.")
throw new TerseFailure(s"Must specify a valid KRaft metadata.version of at least ${MetadataVersion.IBP_3_0_IV0}.")
}
if (!metadataVersion.isProduction) {
if (config.get.unstableMetadataVersionsEnabled) {
System.out.println(s"WARNING: using pre-production metadata version $metadataVersion.")
System.out.println(s"WARNING: using pre-production metadata.version $metadataVersion.")
} else {
throw new TerseFailure(s"Metadata version $metadataVersion is not ready for production use yet.")
throw new TerseFailure(s"The metadata.version $metadataVersion is not ready for production use yet.")
}
}
val metaProperties = new MetaProperties.Builder().
@ -79,7 +79,7 @@ object StorageTool extends Logging {
val metadataRecords : ArrayBuffer[ApiMessageAndVersion] = ArrayBuffer()
getUserScramCredentialRecords(namespace).foreach(userScramCredentialRecords => {
if (!metadataVersion.isScramSupported) {
throw new TerseFailure(s"SCRAM is only supported in metadataVersion IBP_3_5_IV2 or later.")
throw new TerseFailure(s"SCRAM is only supported in metadata.version ${MetadataVersion.IBP_3_5_IV2} or later.")
}
for (record <- userScramCredentialRecords) {
metadataRecords.append(new ApiMessageAndVersion(record, 0.toShort))
@ -139,7 +139,7 @@ object StorageTool extends Logging {
action(storeTrue())
formatParser.addArgument("--release-version", "-r").
action(store()).
help(s"A KRaft release version to use for the initial metadata version. The minimum is 3.0, the default is ${MetadataVersion.LATEST_PRODUCTION.version()}")
help(s"A KRaft release version to use for the initial metadata.version. The minimum is ${MetadataVersion.IBP_3_0_IV0}, the default is ${MetadataVersion.LATEST_PRODUCTION}")
parser.parseArgsOrFail(args)
}

View File

@ -358,7 +358,7 @@ class KRaftClusterTest {
@Test
def testCreateClusterInvalidMetadataVersion(): Unit = {
assertEquals("Bootstrap metadata versions before 3.3-IV0 are not supported. Can't load " +
assertEquals("Bootstrap metadata.version before 3.3-IV0 are not supported. Can't load " +
"metadata from testkit", assertThrows(classOf[RuntimeException], () => {
new KafkaClusterTestKit.Builder(
new TestKitNodes.Builder().
@ -963,7 +963,7 @@ class KRaftClusterTest {
admin.close()
}
TestUtils.waitUntilTrue(() => cluster.brokers().get(1).metadataCache.currentImage().features().metadataVersion().equals(MetadataVersion.latestTesting()),
"Timed out waiting for metadata version update.")
"Timed out waiting for metadata.version update.")
} finally {
cluster.close()
}

View File

@ -405,7 +405,7 @@ class AlterUserScramCredentialsRequestTest extends BaseRequestTest {
assertEquals(1, results.size)
checkAllErrorsAlteringCredentials(results, Errors.UNSUPPORTED_VERSION,
"when altering the credentials on unsupported IBP version")
assertEquals("The current metadata version does not support SCRAM", results.get(0).errorMessage)
assertEquals("The current metadata.version does not support SCRAM", results.get(0).errorMessage)
}
private def sendAlterUserScramCredentialsRequest(request: AlterUserScramCredentialsRequest, socketServer: SocketServer = adminSocketServer): AlterUserScramCredentialsResponse = {

View File

@ -333,7 +333,7 @@ Found problem:
try {
assertEquals(1, StorageTool.main(args))
} catch {
case e: StorageToolTestException => assertEquals(s"SCRAM is only supported in metadataVersion IBP_3_5_IV2 or later.", exitString)
case e: StorageToolTestException => assertEquals(s"SCRAM is only supported in metadata.version ${MetadataVersion.IBP_3_5_IV2} or later.", exitString)
} finally {
Exit.resetExitProcedure()
}
@ -428,7 +428,7 @@ Found problem:
assertEquals("", exitString)
assertEquals(0, exitStatus)
} else {
assertEquals(s"Metadata version ${MetadataVersion.latestTesting().toString} is not ready for " +
assertEquals(s"The metadata.version ${MetadataVersion.latestTesting().toString} is not ready for " +
"production use yet.", exitString)
assertEquals(1, exitStatus)
}

View File

@ -397,7 +397,7 @@ public class FeatureControlManager {
if (record.name().equals(MetadataVersion.FEATURE_NAME)) {
MetadataVersion mv = MetadataVersion.fromFeatureLevel(record.featureLevel());
metadataVersion.set(mv);
log.info("Replayed a FeatureLevelRecord setting metadata version to {}", mv);
log.info("Replayed a FeatureLevelRecord setting metadata.version to {}", mv);
} else {
if (record.featureLevel() == 0) {
finalizedVersions.remove(record.name());

View File

@ -108,7 +108,7 @@ public final class QuorumFeatures {
Map<Integer, ControllerRegistration> controllers
) {
if (!metadataVersion.isMigrationSupported()) {
return Optional.of("Metadata version too low at " + metadataVersion);
return Optional.of("The metadata.version too low at " + metadataVersion);
} else if (!metadataVersion.isControllerRegistrationSupported()) {
return Optional.empty();
}

View File

@ -188,7 +188,7 @@ public class ScramControlManager {
} else {
if (!scramIsSupported) {
userToError.put(deletion.name(), new ApiError(UNSUPPORTED_VERSION,
"The current metadata version does not support SCRAM"));
"The current metadata.version does not support SCRAM"));
} else {
ApiError error = validateDeletion(deletion);
if (error.isFailure()) {
@ -209,7 +209,7 @@ public class ScramControlManager {
} else {
if (!scramIsSupported) {
userToError.put(upsertion.name(), new ApiError(UNSUPPORTED_VERSION,
"The current metadata version does not support SCRAM"));
"The current metadata.version does not support SCRAM"));
} else {
ApiError error = validateUpsertion(upsertion);
if (error.isFailure()) {

View File

@ -26,7 +26,7 @@ public final class MetadataVersionChangeException extends RuntimeException {
private final MetadataVersionChange change;
public MetadataVersionChangeException(MetadataVersionChange change) {
super("The metadata version is changing from " + change.oldVersion() + " to " +
super("The metadata.version is changing from " + change.oldVersion() + " to " +
change.newVersion());
this.change = change;
}

View File

@ -33,7 +33,7 @@ public final class UnwritableMetadataException extends RuntimeException {
String loss
) {
super("Metadata has been lost because the following could not be represented " +
"in metadata version " + metadataVersion + ": " + loss);
"in metadata.version " + metadataVersion + ": " + loss);
this.metadataVersion = metadataVersion;
this.loss = loss;
}

View File

@ -80,7 +80,7 @@ public class BootstrapMetadata {
) {
this.records = Objects.requireNonNull(records);
if (metadataVersion.isLessThan(MINIMUM_BOOTSTRAP_VERSION)) {
throw new RuntimeException("Bootstrap metadata versions before " +
throw new RuntimeException("Bootstrap metadata.version before " +
MINIMUM_BOOTSTRAP_VERSION + " are not supported. Can't load metadata from " +
source);
}

View File

@ -1257,7 +1257,7 @@ public class QuorumControllerTest {
QuorumController active = controlEnv.activeController();
TestUtils.waitForCondition(() ->
active.featureControl().metadataVersion().equals(MetadataVersion.IBP_3_0_IV1),
"Failed to get a metadata version of " + MetadataVersion.IBP_3_0_IV1);
"Failed to get a metadata.version of " + MetadataVersion.IBP_3_0_IV1);
// The ConfigRecord in our bootstrap should not have been applied, since there
// were already records present.
assertEquals(Collections.emptyMap(), active.configurationControl().
@ -1288,7 +1288,7 @@ public class QuorumControllerTest {
FinalizedControllerFeatures features = active.finalizedFeatures(ctx).get();
Optional<Short> metadataVersionOpt = features.get(MetadataVersion.FEATURE_NAME);
return Optional.of(MetadataVersion.IBP_3_3_IV1.featureLevel()).equals(metadataVersionOpt);
}, "Failed to see expected metadata version from bootstrap metadata");
}, "Failed to see expected metadata.version from bootstrap metadata");
TestUtils.waitForCondition(() -> {
ConfigResource defaultBrokerResource = new ConfigResource(BROKER, "");

View File

@ -95,7 +95,7 @@ public class QuorumFeaturesTest {
@Test
public void testZkMigrationNotReadyIfMetadataVersionTooLow() {
assertEquals(Optional.of("Metadata version too low at 3.0-IV1"),
assertEquals(Optional.of("The metadata.version too low at 3.0-IV1"),
QUORUM_FEATURES.reasonAllControllersZkMigrationNotReady(
MetadataVersion.IBP_3_0_IV1, Collections.emptyMap()));
}

View File

@ -50,11 +50,11 @@ public class MetadataVersionChangeTest {
@Test
public void testMetadataVersionChangeExceptionToString() {
assertEquals("org.apache.kafka.image.MetadataVersionChangeException: The metadata " +
"version is changing from 3.0-IV1 to 3.3-IV0",
assertEquals("org.apache.kafka.image.MetadataVersionChangeException: The metadata.version " +
"is changing from 3.0-IV1 to 3.3-IV0",
new MetadataVersionChangeException(CHANGE_3_0_IV1_TO_3_3_IV0).toString());
assertEquals("org.apache.kafka.image.MetadataVersionChangeException: The metadata " +
"version is changing from 3.3-IV0 to 3.0-IV1",
assertEquals("org.apache.kafka.image.MetadataVersionChangeException: The metadata.version " +
"is changing from 3.3-IV0 to 3.0-IV1",
new MetadataVersionChangeException(CHANGE_3_3_IV0_TO_3_0_IV1).toString());
}
}

View File

@ -61,7 +61,7 @@ public class ImageWriterOptionsTest {
i < MetadataVersion.VERSIONS.length;
i++) {
MetadataVersion version = MetadataVersion.VERSIONS[i];
String formattedMessage = String.format("Metadata has been lost because the following could not be represented in metadata version %s: %s", version, expectedMessage);
String formattedMessage = String.format("Metadata has been lost because the following could not be represented in metadata.version %s: %s", version, expectedMessage);
Consumer<UnwritableMetadataException> customLossHandler = e -> assertEquals(formattedMessage, e.getMessage());
ImageWriterOptions options = new ImageWriterOptions.Builder()
.setMetadataVersion(version)

View File

@ -85,7 +85,7 @@ public class BootstrapMetadataTest {
public void testFromRecordsListWithOldMetadataVersion() {
RuntimeException exception = assertThrows(RuntimeException.class,
() -> BootstrapMetadata.fromRecords(RECORDS_WITH_OLD_METADATA_VERSION, "quux"));
assertEquals("Bootstrap metadata versions before 3.3-IV0 are not supported. Can't load " +
assertEquals("Bootstrap metadata.version before 3.3-IV0 are not supported. Can't load " +
"metadata from quux", exception.getMessage());
}
}

View File

@ -563,7 +563,7 @@ public enum MetadataVersion {
return metadataVersion;
}
}
throw new IllegalArgumentException("No MetadataVersion with metadata version " + version);
throw new IllegalArgumentException("No MetadataVersion with feature level " + version);
}
/**

View File

@ -155,7 +155,7 @@ public final class AssignorConfiguration {
switch (UpgradeFromValues.getValueFromString(upgradeFrom)) {
case UPGRADE_FROM_0100:
log.info(
"Downgrading metadata version from {} to 1 for upgrade from 0.10.0.x.",
"Downgrading metadata.version from {} to 1 for upgrade from 0.10.0.x.",
LATEST_SUPPORTED_VERSION
);
return 1;
@ -165,7 +165,7 @@ public final class AssignorConfiguration {
case UPGRADE_FROM_10:
case UPGRADE_FROM_11:
log.info(
"Downgrading metadata version from {} to 2 for upgrade from {}.x.",
"Downgrading metadata.version from {} to 2 for upgrade from {}.x.",
LATEST_SUPPORTED_VERSION,
upgradeFrom
);

View File

@ -242,8 +242,8 @@ public class FeatureCommand {
try {
version = MetadataVersion.fromVersionString(metadata);
} catch (Throwable e) {
throw new TerseException("Unsupported metadata version " + metadata +
". Supported metadata versions are " + metadataVersionsToString(
throw new TerseException("Unsupported metadata.version " + metadata +
". Supported metadata.version are " + metadataVersionsToString(
MetadataVersion.MINIMUM_BOOTSTRAP_VERSION, MetadataVersion.latestProduction()));
}
updates.put(MetadataVersion.FEATURE_NAME, new FeatureUpdate(version.featureLevel(), upgradeType));