mirror of https://github.com/apache/kafka.git
When writing HTML, it's recommended to use the <code> element instead of backticks for inline code formatting. Reviewers: Chia-Ping Tsai <chia7712@gmail.com>, TengYao Chi <frankvicky@apache.org>
This commit is contained in:
parent
9df616da76
commit
d31885d33c
|
|
@ -376,7 +376,7 @@ public class ConsumerConfig extends AbstractConfig {
|
|||
public static final String ALLOW_AUTO_CREATE_TOPICS_CONFIG = "allow.auto.create.topics";
|
||||
private static final String ALLOW_AUTO_CREATE_TOPICS_DOC = "Allow automatic topic creation on the broker when" +
|
||||
" subscribing to or assigning a topic. A topic being subscribed to will be automatically created only if the" +
|
||||
" broker allows for it using `auto.create.topics.enable` broker configuration.";
|
||||
" broker allows for it using <code>auto.create.topics.enable</code> broker configuration.";
|
||||
public static final boolean DEFAULT_ALLOW_AUTO_CREATE_TOPICS = true;
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ public class SslConfigs {
|
|||
+ "this config are dependent on the JVM. "
|
||||
+ "Clients using the defaults for this config and 'ssl.enabled.protocols' will downgrade to 'TLSv1.2' if "
|
||||
+ "the server does not support 'TLSv1.3'. If this config is set to 'TLSv1.2', however, clients will not use 'TLSv1.3' even "
|
||||
+ "if it is one of the values in `ssl.enabled.protocols` and the server only supports 'TLSv1.3'.";
|
||||
+ "if it is one of the values in <code>ssl.enabled.protocols</code> and the server only supports 'TLSv1.3'.";
|
||||
|
||||
public static final String DEFAULT_SSL_PROTOCOL = "TLSv1.3";
|
||||
|
||||
|
|
@ -49,12 +49,12 @@ public class SslConfigs {
|
|||
public static final String SSL_ENABLED_PROTOCOLS_DOC = "The list of protocols enabled for SSL connections. "
|
||||
+ "The default is 'TLSv1.2,TLSv1.3'. This means that clients and servers will prefer TLSv1.3 if both support it "
|
||||
+ "and fallback to TLSv1.2 otherwise (assuming both support at least TLSv1.2). This default should be fine for most use "
|
||||
+ "cases. Also see the config documentation for `ssl.protocol` to understand how it can impact the TLS version negotiation behavior.";
|
||||
+ "cases. Also see the config documentation for <code>ssl.protocol</code> to understand how it can impact the TLS version negotiation behavior.";
|
||||
public static final String DEFAULT_SSL_ENABLED_PROTOCOLS = "TLSv1.2,TLSv1.3";
|
||||
|
||||
public static final String SSL_KEYSTORE_TYPE_CONFIG = "ssl.keystore.type";
|
||||
public static final String SSL_KEYSTORE_TYPE_DOC = "The file format of the key store file. "
|
||||
+ "This is optional for client. The values currently supported by the default `ssl.engine.factory.class` are [JKS, PKCS12, PEM].";
|
||||
+ "This is optional for client. The values currently supported by the default <code>ssl.engine.factory.class</code> are [JKS, PKCS12, PEM].";
|
||||
public static final String DEFAULT_SSL_KEYSTORE_TYPE = "JKS";
|
||||
|
||||
public static final String SSL_KEYSTORE_KEY_CONFIG = "ssl.keystore.key";
|
||||
|
|
@ -84,7 +84,7 @@ public class SslConfigs {
|
|||
+ "the PEM key specified in 'ssl.keystore.key'.";
|
||||
|
||||
public static final String SSL_TRUSTSTORE_TYPE_CONFIG = "ssl.truststore.type";
|
||||
public static final String SSL_TRUSTSTORE_TYPE_DOC = "The file format of the trust store file. The values currently supported by the default `ssl.engine.factory.class` are [JKS, PKCS12, PEM].";
|
||||
public static final String SSL_TRUSTSTORE_TYPE_DOC = "The file format of the trust store file. The values currently supported by the default <code>ssl.engine.factory.class</code> are [JKS, PKCS12, PEM].";
|
||||
public static final String DEFAULT_SSL_TRUSTSTORE_TYPE = "JKS";
|
||||
|
||||
public static final String SSL_TRUSTSTORE_LOCATION_CONFIG = "ssl.truststore.location";
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ package org.apache.kafka.common.config;
|
|||
* <p>Keys that can be used to configure a topic. These keys are useful when creating or reconfiguring a
|
||||
* topic using the AdminClient.
|
||||
*
|
||||
* <p>The intended pattern is for broker configs to include a <code>`log.`</code> prefix. For example, to set the default broker
|
||||
* <p>The intended pattern is for broker configs to include a <code>log.</code> prefix. For example, to set the default broker
|
||||
* cleanup policy, one would set <code>log.cleanup.policy</code> instead of <code>cleanup.policy</code>. Unfortunately, there are many cases
|
||||
* where this pattern is not followed.
|
||||
*/
|
||||
|
|
@ -87,13 +87,13 @@ public class TopicConfig {
|
|||
|
||||
public static final String LOCAL_LOG_RETENTION_MS_CONFIG = "local.retention.ms";
|
||||
public static final String LOCAL_LOG_RETENTION_MS_DOC = "The number of milliseconds to keep the local log segment before it gets deleted. " +
|
||||
"Default value is -2, it represents `retention.ms` value is to be used. The effective value should always be less than or equal " +
|
||||
"to `retention.ms` value.";
|
||||
"Default value is -2, it represents <code>retention.ms</code> value is to be used. The effective value should always be less than or equal " +
|
||||
"to <code>retention.ms</code> value.";
|
||||
|
||||
public static final String LOCAL_LOG_RETENTION_BYTES_CONFIG = "local.retention.bytes";
|
||||
public static final String LOCAL_LOG_RETENTION_BYTES_DOC = "The maximum size of local log segments that can grow for a partition before it " +
|
||||
"deletes the old segments. Default value is -2, it represents `retention.bytes` value to be used. The effective value should always be " +
|
||||
"less than or equal to `retention.bytes` value.";
|
||||
"deletes the old segments. Default value is -2, it represents <code>retention.bytes</code> value to be used. The effective value should always be " +
|
||||
"less than or equal to <code>retention.bytes</code> value.";
|
||||
|
||||
public static final String REMOTE_LOG_COPY_DISABLE_CONFIG = "remote.log.copy.disable";
|
||||
public static final String REMOTE_LOG_COPY_DISABLE_DOC = "Determines whether tiered data for a topic should become read only," +
|
||||
|
|
@ -104,7 +104,7 @@ public class TopicConfig {
|
|||
public static final String REMOTE_LOG_DELETE_ON_DISABLE_CONFIG = "remote.log.delete.on.disable";
|
||||
public static final String REMOTE_LOG_DELETE_ON_DISABLE_DOC = "Determines whether tiered data for a topic should be " +
|
||||
"deleted after tiered storage is disabled on a topic. This configuration should be enabled when trying to " +
|
||||
"set `remote.storage.enable` from true to false";
|
||||
"set <code>remote.storage.enable</code> from true to false";
|
||||
|
||||
public static final String MAX_MESSAGE_BYTES_CONFIG = "max.message.bytes";
|
||||
public static final String MAX_MESSAGE_BYTES_DOC =
|
||||
|
|
@ -165,7 +165,7 @@ public class TopicConfig {
|
|||
public static final String UNCLEAN_LEADER_ELECTION_ENABLE_DOC = "Indicates whether to enable replicas " +
|
||||
"not in the ISR set to be elected as leader as a last resort, even though doing so may result in data " +
|
||||
"loss.<p>Note: In KRaft mode, when enabling this config dynamically, it needs to wait for the unclean leader election" +
|
||||
"thread to trigger election periodically (default is 5 minutes). Please run `kafka-leader-election.sh` with `unclean` option " +
|
||||
"thread to trigger election periodically (default is 5 minutes). Please run <code>kafka-leader-election.sh</code> with <code>unclean</code> option " +
|
||||
"to trigger the unclean leader election immediately if needed.</p>";
|
||||
|
||||
public static final String MIN_IN_SYNC_REPLICAS_CONFIG = "min.insync.replicas";
|
||||
|
|
|
|||
|
|
@ -160,9 +160,9 @@ public class WorkerConfig extends AbstractConfig {
|
|||
public static final String CONNECTOR_CLIENT_POLICY_CLASS_CONFIG = "connector.client.config.override.policy";
|
||||
public static final String CONNECTOR_CLIENT_POLICY_CLASS_DOC =
|
||||
"Class name or alias of implementation of <code>ConnectorClientConfigOverridePolicy</code>. Defines what client configurations can be "
|
||||
+ "overridden by the connector. The default implementation is `All`, meaning connector configurations can override all client properties. "
|
||||
+ "The other possible policies in the framework include `None` to disallow connectors from overriding client properties, "
|
||||
+ "and `Principal` to allow connectors to override only client principals.";
|
||||
+ "overridden by the connector. The default implementation is <code>All</code>, meaning connector configurations can override all client properties. "
|
||||
+ "The other possible policies in the framework include <code>None</code> to disallow connectors from overriding client properties, "
|
||||
+ "and <code>Principal</code> to allow connectors to override only client principals.";
|
||||
public static final String CONNECTOR_CLIENT_POLICY_CLASS_DEFAULT = "All";
|
||||
|
||||
|
||||
|
|
@ -187,7 +187,7 @@ public class WorkerConfig extends AbstractConfig {
|
|||
public static final String TOPIC_CREATION_ENABLE_CONFIG = "topic.creation.enable";
|
||||
protected static final String TOPIC_CREATION_ENABLE_DOC = "Whether to allow "
|
||||
+ "automatic creation of topics used by source connectors, when source connectors "
|
||||
+ "are configured with `" + TOPIC_CREATION_PREFIX + "` properties. Each task will use an "
|
||||
+ "are configured with <code>" + TOPIC_CREATION_PREFIX + "</code> properties. Each task will use an "
|
||||
+ "admin client to create its topics and will not depend on the Kafka brokers "
|
||||
+ "to create topics automatically.";
|
||||
protected static final boolean TOPIC_CREATION_ENABLE_DEFAULT = true;
|
||||
|
|
|
|||
|
|
@ -44,13 +44,13 @@ public enum FieldSyntaxVersion {
|
|||
public static final String FIELD_SYNTAX_VERSION_CONFIG = "field.syntax.version";
|
||||
public static final String FIELD_SYNTAX_VERSION_DOC =
|
||||
"Defines the version of the syntax to access fields. "
|
||||
+ "If set to `V1`, then the field paths are limited to access the elements at the root level of the struct or map. "
|
||||
+ "If set to `V2`, the syntax will support accessing nested elements. "
|
||||
+ "If set to <code>V1</code>, then the field paths are limited to access the elements at the root level of the struct or map. "
|
||||
+ "If set to <code>V2</code>, the syntax will support accessing nested elements. "
|
||||
+ "To access nested elements, dotted notation is used. "
|
||||
+ "If dots are already included in the field name, "
|
||||
+ "then backtick pairs can be used to wrap field names containing dots. "
|
||||
+ "E.g. to access the subfield `baz` from a field named \"foo.bar\" in a struct/map "
|
||||
+ "the following format can be used to access its elements: \"`foo.bar`.baz\".";
|
||||
+ "E.g. to access the subfield <code>baz</code> from a field named \"foo.bar\" in a struct/map "
|
||||
+ "the following format can be used to access its elements: \"<code>foo.bar</code>.baz\".";
|
||||
|
||||
public static final String FIELD_SYNTAX_VERSION_DEFAULT_VALUE = V1.name();
|
||||
|
||||
|
|
|
|||
|
|
@ -4220,7 +4220,7 @@ foo
|
|||
implemented for integration test can be used, which will create a temporary directory in local storage to simulate the remote storage.
|
||||
</p>
|
||||
|
||||
<p>To adopt the `LocalTieredStorage`, the test library needs to be built locally</p>
|
||||
<p>To adopt the <code>LocalTieredStorage</code>, the test library needs to be built locally</p>
|
||||
<pre><code class="language-bash"># please checkout to the specific version tag you're using before building it
|
||||
# ex: `git checkout {{fullDotVersion}}`
|
||||
$ ./gradlew clean :storage:testJar</code></pre>
|
||||
|
|
|
|||
|
|
@ -118,7 +118,7 @@ public class ReplicationConfigs {
|
|||
public static final String UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG = ServerTopicConfigSynonyms.serverSynonym(TopicConfig.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG);
|
||||
public static final String UNCLEAN_LEADER_ELECTION_ENABLE_DOC = "Indicates whether to enable replicas not in the ISR set to be elected as leader as a last resort, even though doing so may result in data loss" +
|
||||
"<p>Note: In KRaft mode, when enabling this config dynamically, it needs to wait for the unclean leader election " +
|
||||
"thread to trigger election periodically (default is 5 minutes). Please run `kafka-leader-election.sh` with `unclean` option " +
|
||||
"thread to trigger election periodically (default is 5 minutes). Please run <code>kafka-leader-election.sh</code> with <code>unclean</code> option " +
|
||||
"to trigger the unclean leader election immediately if needed.</p>";
|
||||
|
||||
public static final String INTER_BROKER_SECURITY_PROTOCOL_CONFIG = "security.inter.broker.protocol";
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@ public final class RemoteLogManagerConfig {
|
|||
*/
|
||||
public static final String REMOTE_STORAGE_MANAGER_CONFIG_PREFIX_PROP = "remote.log.storage.manager.impl.prefix";
|
||||
public static final String REMOTE_STORAGE_MANAGER_CONFIG_PREFIX_DOC = "Prefix used for properties to be passed to RemoteStorageManager " +
|
||||
"implementation. For example this value can be `rsm.config.`.";
|
||||
"implementation. For example this value can be <code>rsm.config.</code>.";
|
||||
public static final String DEFAULT_REMOTE_STORAGE_MANAGER_CONFIG_PREFIX = "rsm.config.";
|
||||
|
||||
/**
|
||||
|
|
@ -49,7 +49,7 @@ public final class RemoteLogManagerConfig {
|
|||
*/
|
||||
public static final String REMOTE_LOG_METADATA_MANAGER_CONFIG_PREFIX_PROP = "remote.log.metadata.manager.impl.prefix";
|
||||
public static final String REMOTE_LOG_METADATA_MANAGER_CONFIG_PREFIX_DOC = "Prefix used for properties to be passed to RemoteLogMetadataManager " +
|
||||
"implementation. For example this value can be `rlmm.config.`.";
|
||||
"implementation. For example this value can be <code>rlmm.config.</code>.";
|
||||
public static final String DEFAULT_REMOTE_LOG_METADATA_MANAGER_CONFIG_PREFIX = "rlmm.config.";
|
||||
|
||||
public static final String REMOTE_LOG_STORAGE_SYSTEM_ENABLE_PROP = "remote.log.storage.system.enable";
|
||||
|
|
@ -58,20 +58,20 @@ public final class RemoteLogManagerConfig {
|
|||
public static final boolean DEFAULT_REMOTE_LOG_STORAGE_SYSTEM_ENABLE = false;
|
||||
|
||||
public static final String REMOTE_STORAGE_MANAGER_CLASS_NAME_PROP = "remote.log.storage.manager.class.name";
|
||||
public static final String REMOTE_STORAGE_MANAGER_CLASS_NAME_DOC = "Fully qualified class name of `RemoteStorageManager` implementation.";
|
||||
public static final String REMOTE_STORAGE_MANAGER_CLASS_NAME_DOC = "Fully qualified class name of <code>RemoteStorageManager</code> implementation.";
|
||||
|
||||
public static final String REMOTE_STORAGE_MANAGER_CLASS_PATH_PROP = "remote.log.storage.manager.class.path";
|
||||
public static final String REMOTE_STORAGE_MANAGER_CLASS_PATH_DOC = "Class path of the `RemoteStorageManager` implementation. " +
|
||||
public static final String REMOTE_STORAGE_MANAGER_CLASS_PATH_DOC = "Class path of the <code>RemoteStorageManager</code> implementation. " +
|
||||
"If specified, the RemoteStorageManager implementation and its dependent libraries will be loaded by a dedicated " +
|
||||
"classloader which searches this class path before the Kafka broker class path. The syntax of this parameter is same " +
|
||||
"as the standard Java class path string.";
|
||||
|
||||
public static final String REMOTE_LOG_METADATA_MANAGER_CLASS_NAME_PROP = "remote.log.metadata.manager.class.name";
|
||||
public static final String REMOTE_LOG_METADATA_MANAGER_CLASS_NAME_DOC = "Fully qualified class name of `RemoteLogMetadataManager` implementation.";
|
||||
public static final String REMOTE_LOG_METADATA_MANAGER_CLASS_NAME_DOC = "Fully qualified class name of <code>RemoteLogMetadataManager</code> implementation.";
|
||||
public static final String DEFAULT_REMOTE_LOG_METADATA_MANAGER_CLASS_NAME = "org.apache.kafka.server.log.remote.metadata.storage.TopicBasedRemoteLogMetadataManager";
|
||||
|
||||
public static final String REMOTE_LOG_METADATA_MANAGER_CLASS_PATH_PROP = "remote.log.metadata.manager.class.path";
|
||||
public static final String REMOTE_LOG_METADATA_MANAGER_CLASS_PATH_DOC = "Class path of the `RemoteLogMetadataManager` implementation. " +
|
||||
public static final String REMOTE_LOG_METADATA_MANAGER_CLASS_PATH_DOC = "Class path of the <code>RemoteLogMetadataManager</code> implementation. " +
|
||||
"If specified, the RemoteLogMetadataManager implementation and its dependent libraries will be loaded by a dedicated " +
|
||||
"classloader which searches this class path before the Kafka broker class path. The syntax of this parameter is same " +
|
||||
"as the standard Java class path string.";
|
||||
|
|
@ -149,14 +149,14 @@ public final class RemoteLogManagerConfig {
|
|||
|
||||
public static final String LOG_LOCAL_RETENTION_MS_PROP = "log.local.retention.ms";
|
||||
public static final String LOG_LOCAL_RETENTION_MS_DOC = "The number of milliseconds to keep the local log segments before it gets eligible for deletion. " +
|
||||
"Default value is -2, it represents `log.retention.ms` value is to be used. The effective value should always be less than or equal " +
|
||||
"to `log.retention.ms` value.";
|
||||
"Default value is -2, it represents <code>log.retention.ms</code> value is to be used. The effective value should always be less than or equal " +
|
||||
"to <code>log.retention.ms</code> value.";
|
||||
public static final Long DEFAULT_LOG_LOCAL_RETENTION_MS = -2L;
|
||||
|
||||
public static final String LOG_LOCAL_RETENTION_BYTES_PROP = "log.local.retention.bytes";
|
||||
public static final String LOG_LOCAL_RETENTION_BYTES_DOC = "The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. " +
|
||||
"Default value is -2, it represents `log.retention.bytes` value to be used. The effective value should always be " +
|
||||
"less than or equal to `log.retention.bytes` value.";
|
||||
"Default value is -2, it represents <code>log.retention.bytes</code> value to be used. The effective value should always be " +
|
||||
"less than or equal to <code>log.retention.bytes</code> value.";
|
||||
public static final Long DEFAULT_LOG_LOCAL_RETENTION_BYTES = -2L;
|
||||
|
||||
public static final String REMOTE_LOG_MANAGER_COPY_MAX_BYTES_PER_SECOND_PROP = "remote.log.manager.copy.max.bytes.per.second";
|
||||
|
|
|
|||
|
|
@ -647,9 +647,9 @@ public class StreamsConfig extends AbstractConfig {
|
|||
" the task available on one instance while it is warming up on another instance it has been reassigned to. Used to throttle how much extra broker " +
|
||||
" traffic and cluster state can be used for high availability. Must be at least 1." +
|
||||
"Note that one warmup replica corresponds to one Stream Task. Furthermore, note that each warmup replica can only be promoted to an active task " +
|
||||
"during a rebalance (normally during a so-called probing rebalance, which occur at a frequency specified by the `probing.rebalance.interval.ms` config). This means " +
|
||||
"during a rebalance (normally during a so-called probing rebalance, which occur at a frequency specified by the <code>probing.rebalance.interval.ms</code> config). This means " +
|
||||
"that the maximum rate at which active tasks can be migrated from one Kafka Streams Instance to another instance can be determined by " +
|
||||
"(`max.warmup.replicas` / `probing.rebalance.interval.ms`).";
|
||||
"(<code>max.warmup.replicas</code> / <code>probing.rebalance.interval.ms</code>).";
|
||||
|
||||
/** {@code metadata.max.age.ms} */
|
||||
@SuppressWarnings("WeakerAccess")
|
||||
|
|
@ -821,7 +821,7 @@ public class StreamsConfig extends AbstractConfig {
|
|||
/** {@code upgrade.from} */
|
||||
@SuppressWarnings("WeakerAccess")
|
||||
public static final String UPGRADE_FROM_CONFIG = "upgrade.from";
|
||||
private static final String UPGRADE_FROM_DOC = "Allows live upgrading (and downgrading in some cases -- see upgrade guide) in a backward compatible way. Default is `null`. " +
|
||||
private static final String UPGRADE_FROM_DOC = "Allows live upgrading (and downgrading in some cases -- see upgrade guide) in a backward compatible way. Default is <code>null</code>. " +
|
||||
"Please refer to the Kafka Streams upgrade guide for instructions on how and when to use this config. " +
|
||||
"Note that when upgrading from 3.5 to a newer version it is never required to specify this config, " +
|
||||
"while upgrading live directly to 4.0+ from 2.3 or below is no longer supported even with this config. " +
|
||||
|
|
|
|||
Loading…
Reference in New Issue