mirror of https://github.com/apache/kafka.git
MINOR: Remove ZooKeeper mentions in comments (#18646)
Reviewers: Mickael Maison <mickael.maison@gmail.com>
This commit is contained in:
parent
04567cdb22
commit
5631be20a6
|
@ -58,9 +58,8 @@ import scala.jdk.CollectionConverters._
|
|||
* KIP-412 added support for changing log4j log levels via IncrementalAlterConfigs, but
|
||||
* not via the original AlterConfigs. In retrospect, this would have been better off as a
|
||||
* separate RPC, since the semantics are quite different. In particular, KIP-226 configs
|
||||
* are stored durably (in ZK or KRaft) and persist across broker restarts, but KIP-412
|
||||
* log4j levels do not. However, we have to handle it here now in order to maintain
|
||||
* compatibility.
|
||||
* are stored durably and persist across broker restarts, but KIP-412 log4j levels do not.
|
||||
* However, we have to handle it here now in order to maintain compatibility.
|
||||
*
|
||||
* Configuration processing is split into two parts.
|
||||
* - The first step, called "preprocessing," handles setting KIP-412 log levels, validating
|
||||
|
@ -69,14 +68,10 @@ import scala.jdk.CollectionConverters._
|
|||
* - The second step is "persistence," and handles storing the configurations durably to our
|
||||
* metadata store.
|
||||
*
|
||||
* When KIP-590 forwarding is active (such as in KRaft mode), preprocessing will happen
|
||||
* on the broker, while persistence will happen on the active controller. (If KIP-590
|
||||
* forwarding is not active, then both steps are done on the same broker.)
|
||||
*
|
||||
* In KRaft mode, the active controller performs its own configuration validation step in
|
||||
* The active controller performs its own configuration validation step in
|
||||
* [[kafka.server.ControllerConfigurationValidator]]. This is mainly important for
|
||||
* TOPIC resources, since we already validated changes to BROKER resources on the
|
||||
* forwarding broker. The KRaft controller is also responsible for enforcing the configured
|
||||
* forwarding broker. The controller is also responsible for enforcing the configured
|
||||
* [[org.apache.kafka.server.policy.AlterConfigPolicy]].
|
||||
*/
|
||||
class ConfigAdminManager(nodeId: Int,
|
||||
|
|
|
@ -336,12 +336,12 @@ class DynamicBrokerConfig(private val kafkaConfig: KafkaConfig) extends Logging
|
|||
}
|
||||
|
||||
/**
|
||||
* All config updates through ZooKeeper are triggered through actual changes in values stored in ZooKeeper.
|
||||
* Config updates are triggered through actual changes in stored values.
|
||||
* For some configs like SSL keystores and truststores, we also want to reload the store if it was modified
|
||||
* in-place, even though the actual value of the file path and password haven't changed. This scenario alone
|
||||
* is handled here when a config update request using admin client is processed by ZkAdminManager. If any of
|
||||
* the SSL configs have changed, then the update will not be done here, but will be handled later when ZK
|
||||
* changes are processed. At the moment, only listener configs are considered for reloading.
|
||||
* in-place, even though the actual value of the file path and password haven't changed. This scenario is
|
||||
* handled when a config update request using admin client is processed by the AdminManager. If any of
|
||||
* the SSL configs have changed, then the update will be handled when configuration changes are processed.
|
||||
* At the moment, only listener configs are considered for reloading.
|
||||
*/
|
||||
private[server] def reloadUpdatedFilesWithoutConfigChange(newProps: Properties): Unit = CoreUtils.inWriteLock(lock) {
|
||||
reconfigurables.forEach(r => {
|
||||
|
|
|
@ -173,11 +173,9 @@ public class ConfigCommandIntegrationTest {
|
|||
alterConfigWithAdmin(client, Optional.of(defaultBrokerId),
|
||||
singletonMap("listener.name.external.ssl.keystore.password", "secret"), alterOpts);
|
||||
|
||||
// Password config update with encoder secret should succeed and encoded password must be stored in ZK
|
||||
Map<String, String> configs = new HashMap<>();
|
||||
configs.put("listener.name.external.ssl.keystore.password", "secret");
|
||||
configs.put("log.cleaner.threads", "2");
|
||||
// Password encoder configs
|
||||
|
||||
// Password config update at default cluster-level should fail
|
||||
assertThrows(ExecutionException.class,
|
||||
|
@ -382,7 +380,6 @@ public class ConfigCommandIntegrationTest {
|
|||
@ClusterTest(
|
||||
// Must be at greater than 1MB per cleaner thread, set to 2M+2 so that we can set 2 cleaner threads.
|
||||
serverProperties = {@ClusterConfigProperty(key = "log.cleaner.dedupe.buffer.size", value = "2097154")},
|
||||
// Zk code has been removed, use kraft and mockito to mock this situation
|
||||
metadataVersion = MetadataVersion.IBP_3_3_IV0
|
||||
)
|
||||
public void testUnsupportedVersionException() {
|
||||
|
|
Loading…
Reference in New Issue