mirror of https://github.com/apache/kafka.git
MINOR: Fix some typos
Just a doc change Author: John Eismeier <john.eismeier@gmail.com> Reviewers: Ewen Cheslack-Postava <ewen@confluent.io> Closes #4573 from jeis2497052/trunk
This commit is contained in:
parent
74f686d3c2
commit
83c3996974
|
@ -220,7 +220,7 @@ public class FileRecordsTest {
|
|||
position += message2Size + batches.get(2).sizeInBytes();
|
||||
|
||||
int message4Size = batches.get(3).sizeInBytes();
|
||||
assertEquals("Should be able to find fourth message from a non-existant offset",
|
||||
assertEquals("Should be able to find fourth message from a non-existent offset",
|
||||
new FileRecords.LogOffsetPosition(50L, position, message4Size),
|
||||
fileRecords.searchForOffsetWithSize(3, position));
|
||||
assertEquals("Should be able to find fourth message by correct offset",
|
||||
|
|
|
@ -57,11 +57,11 @@ object Mx4jLoader extends Logging {
|
|||
httpAdaptorClass.getMethod("setProcessor", Class.forName("mx4j.tools.adaptor.http.ProcessorMBean")).invoke(httpAdaptor, xsltProcessor.asInstanceOf[AnyRef])
|
||||
mbs.registerMBean(xsltProcessor, processorName)
|
||||
httpAdaptorClass.getMethod("start").invoke(httpAdaptor)
|
||||
info("mx4j successfuly loaded")
|
||||
info("mx4j successfully loaded")
|
||||
return true
|
||||
}
|
||||
catch {
|
||||
case _: ClassNotFoundException =>
|
||||
case _: ClassNotFoundException =>
|
||||
info("Will not load MX4J, mx4j-tools.jar is not in the classpath")
|
||||
case e: Throwable =>
|
||||
warn("Could not start register mbean in JMX", e)
|
||||
|
|
|
@ -169,7 +169,7 @@ class AdminTest extends ZooKeeperTestHarness with Logging with RackAwareTest {
|
|||
zkUtils.updatePersistentPath(ConfigEntityZNode.path(ConfigType.Client, clientId), Json.encodeAsString(map.asJava))
|
||||
|
||||
val configInZk: Map[String, Properties] = AdminUtils.fetchAllEntityConfigs(zkUtils, ConfigType.Client)
|
||||
assertEquals("Must have 1 overriden client config", 1, configInZk.size)
|
||||
assertEquals("Must have 1 overridden client config", 1, configInZk.size)
|
||||
assertEquals(props, configInZk(clientId))
|
||||
|
||||
// Test that the existing clientId overrides are read
|
||||
|
|
|
@ -307,7 +307,7 @@ class AdminZkClientTest extends ZooKeeperTestHarness with Logging with RackAware
|
|||
zkClient.setOrCreateEntityConfigs(ConfigType.Client, clientId, props)
|
||||
|
||||
val configInZk: Map[String, Properties] = adminZkClient.fetchAllEntityConfigs(ConfigType.Client)
|
||||
assertEquals("Must have 1 overriden client config", 1, configInZk.size)
|
||||
assertEquals("Must have 1 overridden client config", 1, configInZk.size)
|
||||
assertEquals(props, configInZk(clientId))
|
||||
|
||||
// Test that the existing clientId overrides are read
|
||||
|
|
|
@ -277,7 +277,7 @@
|
|||
offsets are both updated or neither is. We follow similar patterns for many other data systems which require these stronger semantics and for which the messages do not have a primary key to allow for deduplication.
|
||||
<p>
|
||||
So effectively Kafka supports exactly-once delivery in <a href="https://kafka.apache.org/documentation/streams">Kafka Streams</a>, and the transactional producer/consumer can be used generally to provide
|
||||
exactly-once delivery when transfering and processing data between Kafka topics. Exactly-once delivery for other destination systems generally requires cooperation with such systems, but Kafka provides the
|
||||
exactly-once delivery when transferring and processing data between Kafka topics. Exactly-once delivery for other destination systems generally requires cooperation with such systems, but Kafka provides the
|
||||
offset which makes implementing this feasible (see also <a href="https://kafka.apache.org/documentation/#connect">Kafka Connect</a>). Otherwise, Kafka guarantees at-least-once delivery by default, and allows
|
||||
the user to implement at-most-once delivery by disabling retries on the producer and committing offsets in the consumer prior to processing a batch of messages.
|
||||
|
||||
|
|
|
@ -220,7 +220,7 @@
|
|||
ssl.keystore.location=/var/private/ssl/client.keystore.jks
|
||||
ssl.keystore.password=test1234
|
||||
ssl.key.password=test1234</pre>
|
||||
|
||||
|
||||
Other configuration settings that may also be needed depending on our requirements and the broker configuration:
|
||||
<ol>
|
||||
<li>ssl.provider (Optional). The name of the security provider used for SSL connections. Default value is the default security provider of the JVM.</li>
|
||||
|
@ -342,7 +342,7 @@
|
|||
</li>
|
||||
<li>Pass the JAAS config file location as JVM parameter to each client JVM. For example:
|
||||
<pre class="brush: bash;"> -Djava.security.auth.login.config=/etc/kafka/kafka_client_jaas.conf</pre></li>
|
||||
</ol>
|
||||
</ol>
|
||||
</li>
|
||||
</ol>
|
||||
</li>
|
||||
|
@ -455,7 +455,7 @@
|
|||
Clients (producers, consumers, connect workers, etc) will authenticate to the cluster with their
|
||||
own principal (usually with the same name as the user running the client), so obtain or create
|
||||
these principals as needed. Then configure the JAAS configuration property for each client.
|
||||
Different clients within a JVM may run as different users by specifiying different principals.
|
||||
Different clients within a JVM may run as different users by specifying different principals.
|
||||
The property <code>sasl.jaas.config</code> in producer.properties or consumer.properties describes
|
||||
how clients like producer and consumer can connect to the Kafka Broker. The following is an example
|
||||
configuration for a client using a keytab (recommended for long-running processes):
|
||||
|
@ -621,9 +621,9 @@
|
|||
<li><h5><a id="security_sasl_scram_clientconfig" href="#security_sasl_scram_clientconfig">Configuring Kafka Clients</a></h5>
|
||||
To configure SASL authentication on the clients:
|
||||
<ol>
|
||||
<li>Configure the JAAS configuration property for each client in producer.properties or consumer.properties.
|
||||
<li>Configure the JAAS configuration property for each client in producer.properties or consumer.properties.
|
||||
The login module describes how the clients like producer and consumer can connect to the Kafka Broker.
|
||||
The following is an example configuration for a client for the SCRAM mechanisms:
|
||||
The following is an example configuration for a client for the SCRAM mechanisms:
|
||||
<pre class="brush: text;">
|
||||
sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
|
||||
username="alice" \
|
||||
|
@ -973,7 +973,7 @@
|
|||
<ol>
|
||||
<li>Configure the JAAS configuration property for each client in producer.properties or consumer.properties.
|
||||
The login module describes how the clients like producer and consumer can connect to the Kafka Broker.
|
||||
The following is an example configuration for a client for the token authentication:
|
||||
The following is an example configuration for a client for the token authentication:
|
||||
<pre class="brush: text;">
|
||||
sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
|
||||
username="tokenID123" \
|
||||
|
|
20
release.py
20
release.py
|
@ -285,13 +285,13 @@ if not user_ok("""Requirements:
|
|||
signing.keyId=your-gpgkeyId
|
||||
signing.password=your-gpg-passphrase
|
||||
signing.secretKeyRingFile=/Users/your-id/.gnupg/secring.gpg (if you are using GPG 2.1 and beyond, then this file will no longer exist anymore, and you have to manually create it from the new private key directory with "gpg --export-secret-keys -o ~/.gnupg/secring.gpg")
|
||||
8. ~/.m2/settings.xml configured for pgp signing and uploading to apache release maven, i.e.,
|
||||
8. ~/.m2/settings.xml configured for pgp signing and uploading to apache release maven, i.e.,
|
||||
<server>
|
||||
<id>apache.releases.https</id>
|
||||
<username>your-apache-id</username>
|
||||
<password>your-apache-passwd</password>
|
||||
</server>
|
||||
<server>
|
||||
<server>
|
||||
<id>your-gpgkeyId</id>
|
||||
<passphrase>your-gpg-passphase</passphrase>
|
||||
</server>
|
||||
|
@ -299,18 +299,18 @@ if not user_ok("""Requirements:
|
|||
<id>gpg-signing</id>
|
||||
<properties>
|
||||
<gpg.keyname>your-gpgkeyId</gpg.keyname>
|
||||
<gpg.passphraseServerId>your-gpgkeyId</gpg.passphraseServerId>
|
||||
<gpg.passphraseServerId>your-gpgkeyId</gpg.passphraseServerId>
|
||||
</properties>
|
||||
</profile>
|
||||
9. You may also need to update some gnupgp configs:
|
||||
~/.gnupg/gpg-agent.conf
|
||||
allow-loopback-pinentry
|
||||
~/.gnupg/gpg-agent.conf
|
||||
allow-loopback-pinentry
|
||||
|
||||
~/.gnupg/gpg.conf
|
||||
use-agent
|
||||
pinentry-mode loopback
|
||||
~/.gnupg/gpg.conf
|
||||
use-agent
|
||||
pinentry-mode loopback
|
||||
|
||||
echo RELOADAGENT | gpg-connect-agent
|
||||
echo RELOADAGENT | gpg-connect-agent
|
||||
|
||||
If any of these are missing, see https://cwiki.apache.org/confluence/display/KAFKA/Release+Process for instructions on setting them up.
|
||||
|
||||
|
@ -404,7 +404,7 @@ cmd("remove backup pom.xml", "rm streams/quickstart/pom.xml.orig")
|
|||
cmd("remove backup java pom.xml", "rm streams/quickstart/java/pom.xml.orig")
|
||||
cmd("remove backup java pom.xml", "rm streams/quickstart/java/src/main/resources/archetype-resources/pom.xml.orig")
|
||||
# Command in explicit list due to messages with spaces
|
||||
cmd("Commiting version number updates", ["git", "commit", "-a", "-m", "Bump version to %s" % release_version])
|
||||
cmd("Committing version number updates", ["git", "commit", "-a", "-m", "Bump version to %s" % release_version])
|
||||
# Command in explicit list due to messages with spaces
|
||||
cmd("Tagging release candidate %s" % rc_tag, ["git", "tag", "-a", rc_tag, "-m", rc_tag])
|
||||
rc_githash = cmd_output("git show-ref --hash " + rc_tag)
|
||||
|
|
|
@ -258,7 +258,7 @@ public class TopologyTest {
|
|||
public void shouldNotAllowToAddStateStoreToNonExistingProcessor() {
|
||||
mockStoreBuilder();
|
||||
EasyMock.replay(storeBuilder);
|
||||
topology.addStateStore(storeBuilder, "no-such-processsor");
|
||||
topology.addStateStore(storeBuilder, "no-such-processor");
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -103,7 +103,7 @@ public class InternalTopologyBuilderTest {
|
|||
@Test
|
||||
public void shouldAddPatternSourceWithoutOffsetReset() {
|
||||
final Pattern expectedPattern = Pattern.compile("test-.*");
|
||||
|
||||
|
||||
builder.addSource(null, "source", null, stringSerde.deserializer(), stringSerde.deserializer(), Pattern.compile("test-.*"));
|
||||
|
||||
assertEquals(expectedPattern.pattern(), builder.sourceTopicPattern().pattern());
|
||||
|
@ -286,7 +286,7 @@ public class InternalTopologyBuilderTest {
|
|||
|
||||
@Test(expected = TopologyException.class)
|
||||
public void testAddStateStoreWithNonExistingProcessor() {
|
||||
builder.addStateStore(storeBuilder, "no-such-processsor");
|
||||
builder.addStateStore(storeBuilder, "no-such-processor");
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
Loading…
Reference in New Issue