KAFKA-15098 Allow authorizers to be configured in ZK migration (#13895)

Reviewers: Ron Dagostino <rdagostino@confluent.io>
This commit is contained in:
David Arthur 2023-06-22 09:34:49 -04:00 committed by GitHub
parent a81486e4f8
commit 1bf7039999
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 59 additions and 10 deletions

View File

@ -2305,8 +2305,6 @@ class KafkaConfig private(doLog: Boolean, val props: java.util.Map[_, _], dynami
} else { } else {
// ZK-based // ZK-based
if (migrationEnabled) { if (migrationEnabled) {
require(!originals.containsKey(KafkaConfig.AuthorizerClassNameProp),
s"ZooKeeper migration does not yet support authorizers. Remove ${KafkaConfig.AuthorizerClassNameProp} before performing a migration.")
validateNonEmptyQuorumVotersForMigration() validateNonEmptyQuorumVotersForMigration()
require(controllerListenerNames.nonEmpty, require(controllerListenerNames.nonEmpty,
s"${KafkaConfig.ControllerListenerNamesProp} must not be empty when running in ZooKeeper migration mode: ${controllerListenerNames.asJava}") s"${KafkaConfig.ControllerListenerNamesProp} must not be empty when running in ZooKeeper migration mode: ${controllerListenerNames.asJava}")

View File

@ -121,6 +121,60 @@ class ZkMigrationIntegrationTest {
} }
} }
@ClusterTest(
brokers = 3, clusterType = Type.ZK, autoStart = AutoStart.YES,
metadataVersion = MetadataVersion.IBP_3_4_IV0,
serverProperties = Array(
new ClusterConfigProperty(key = "authorizer.class.name", value = "kafka.security.authorizer.AclAuthorizer"),
new ClusterConfigProperty(key = "super.users", value = "User:ANONYMOUS"),
new ClusterConfigProperty(key = "inter.broker.listener.name", value = "EXTERNAL"),
new ClusterConfigProperty(key = "listeners", value = "PLAINTEXT://localhost:0,EXTERNAL://localhost:0"),
new ClusterConfigProperty(key = "advertised.listeners", value = "PLAINTEXT://localhost:0,EXTERNAL://localhost:0"),
new ClusterConfigProperty(key = "listener.security.protocol.map", value = "EXTERNAL:PLAINTEXT,PLAINTEXT:PLAINTEXT")
)
)
def testStartZkBrokerWithAuthorizer(zkCluster: ClusterInstance): Unit = {
// Bootstrap the ZK cluster ID into KRaft
val clusterId = zkCluster.clusterId()
val kraftCluster = new KafkaClusterTestKit.Builder(
new TestKitNodes.Builder().
setBootstrapMetadataVersion(MetadataVersion.IBP_3_4_IV0).
setClusterId(Uuid.fromString(clusterId)).
setNumBrokerNodes(0).
setNumControllerNodes(1).build())
.setConfigProp(KafkaConfig.MigrationEnabledProp, "true")
.setConfigProp(KafkaConfig.ZkConnectProp, zkCluster.asInstanceOf[ZkClusterInstance].getUnderlying.zkConnect)
.build()
try {
kraftCluster.format()
kraftCluster.startup()
val readyFuture = kraftCluster.controllers().values().asScala.head.controller.waitForReadyBrokers(3)
// Enable migration configs and restart brokers
log.info("Restart brokers in migration mode")
val clientProps = kraftCluster.controllerClientProperties()
val voters = clientProps.get(RaftConfig.QUORUM_VOTERS_CONFIG)
zkCluster.config().serverProperties().put(KafkaConfig.MigrationEnabledProp, "true")
zkCluster.config().serverProperties().put(RaftConfig.QUORUM_VOTERS_CONFIG, voters)
zkCluster.config().serverProperties().put(KafkaConfig.ControllerListenerNamesProp, "CONTROLLER")
zkCluster.config().serverProperties().put(KafkaConfig.ListenerSecurityProtocolMapProp, "CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT,PLAINTEXT:PLAINTEXT")
zkCluster.rollingBrokerRestart() // This would throw if authorizers weren't allowed
zkCluster.waitForReadyBrokers()
readyFuture.get(30, TimeUnit.SECONDS)
val zkClient = zkCluster.asInstanceOf[ZkClusterInstance].getUnderlying().zkClient
TestUtils.waitUntilTrue(() => zkClient.getControllerId.contains(3000), "Timed out waiting for KRaft controller to take over")
def inDualWrite(): Boolean = {
val migrationState = kraftCluster.controllers().get(3000).migrationSupport.get.migrationDriver.migrationState().get(10, TimeUnit.SECONDS)
migrationState.allowDualWrite()
}
TestUtils.waitUntilTrue(() => inDualWrite(), "Timed out waiting for dual-write mode")
} finally {
shutdownInSequence(zkCluster, kraftCluster)
}
}
/** /**
* Test ZkMigrationClient against a real ZooKeeper-backed Kafka cluster. This test creates a ZK cluster * Test ZkMigrationClient against a real ZooKeeper-backed Kafka cluster. This test creates a ZK cluster
* and modifies data using AdminClient. The ZkMigrationClient is then used to read the metadata from ZK * and modifies data using AdminClient. The ZkMigrationClient is then used to read the metadata from ZK

View File

@ -1714,12 +1714,9 @@ class KafkaConfigTest {
// All needed configs are now set // All needed configs are now set
KafkaConfig.fromProps(props) KafkaConfig.fromProps(props)
// Don't allow migration startup with an authorizer set // Check that we allow authorizer to be set
props.setProperty(KafkaConfig.AuthorizerClassNameProp, classOf[AclAuthorizer].getCanonicalName) props.setProperty(KafkaConfig.AuthorizerClassNameProp, classOf[AclAuthorizer].getCanonicalName)
assertEquals( KafkaConfig.fromProps(props)
"requirement failed: ZooKeeper migration does not yet support authorizers. Remove authorizer.class.name before performing a migration.",
assertThrows(classOf[IllegalArgumentException], () => KafkaConfig.fromProps(props)).getMessage)
props.remove(KafkaConfig.AuthorizerClassNameProp)
// Don't allow migration startup with an older IBP // Don't allow migration startup with an older IBP
props.setProperty(KafkaConfig.InterBrokerProtocolVersionProp, MetadataVersion.IBP_3_3_IV0.version()) props.setProperty(KafkaConfig.InterBrokerProtocolVersionProp, MetadataVersion.IBP_3_3_IV0.version())

View File

@ -145,7 +145,7 @@ public class KRaftMigrationDriver implements MetadataPublisher {
} }
// Visible for testing // Visible for testing
CompletableFuture<MigrationDriverState> migrationState() { public CompletableFuture<MigrationDriverState> migrationState() {
CompletableFuture<MigrationDriverState> stateFuture = new CompletableFuture<>(); CompletableFuture<MigrationDriverState> stateFuture = new CompletableFuture<>();
eventQueue.append(() -> stateFuture.complete(migrationState)); eventQueue.append(() -> stateFuture.complete(migrationState));
return stateFuture; return stateFuture;
@ -328,7 +328,7 @@ public class KRaftMigrationDriver implements MetadataPublisher {
/** /**
* Construct and enqueue a {@link MetadataChangeEvent} with a given completion handler. In production use cases, * Construct and enqueue a {@link MetadataChangeEvent} with a given completion handler. In production use cases,
* this handler is a no-op. This method exists so we can add additional logic in our unit tests to wait for the * this handler is a no-op. This method exists so that we can add additional logic in our unit tests to wait for the
* enqueued event to finish executing. * enqueued event to finish executing.
*/ */
void enqueueMetadataChangeEvent( void enqueueMetadataChangeEvent(

View File

@ -55,7 +55,7 @@ public enum MigrationDriverState {
this.allowDualWrite = allowDualWrite; this.allowDualWrite = allowDualWrite;
} }
boolean allowDualWrite() { public boolean allowDualWrite() {
return allowDualWrite; return allowDualWrite;
} }
} }