From 83c39969745dc7076e3756439f6842e7431a8c55 Mon Sep 17 00:00:00 2001 From: John Eismeier Date: Sat, 20 Oct 2018 19:40:53 -0700 Subject: [PATCH] MINOR: Fix some typos Just a doc change Author: John Eismeier Reviewers: Ewen Cheslack-Postava Closes #4573 from jeis2497052/trunk --- .../kafka/common/record/FileRecordsTest.java | 2 +- .../main/scala/kafka/utils/Mx4jLoader.scala | 4 ++-- .../scala/unit/kafka/admin/AdminTest.scala | 2 +- .../unit/kafka/zk/AdminZkClientTest.scala | 2 +- docs/design.html | 2 +- docs/security.html | 12 +++++------ release.py | 20 +++++++++---------- .../apache/kafka/streams/TopologyTest.java | 2 +- .../InternalTopologyBuilderTest.java | 4 ++-- 9 files changed, 25 insertions(+), 25 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/common/record/FileRecordsTest.java b/clients/src/test/java/org/apache/kafka/common/record/FileRecordsTest.java index 4b2b3618a4a..637da9386e5 100644 --- a/clients/src/test/java/org/apache/kafka/common/record/FileRecordsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/record/FileRecordsTest.java @@ -220,7 +220,7 @@ public class FileRecordsTest { position += message2Size + batches.get(2).sizeInBytes(); int message4Size = batches.get(3).sizeInBytes(); - assertEquals("Should be able to find fourth message from a non-existant offset", + assertEquals("Should be able to find fourth message from a non-existent offset", new FileRecords.LogOffsetPosition(50L, position, message4Size), fileRecords.searchForOffsetWithSize(3, position)); assertEquals("Should be able to find fourth message by correct offset", diff --git a/core/src/main/scala/kafka/utils/Mx4jLoader.scala b/core/src/main/scala/kafka/utils/Mx4jLoader.scala index d9d1cb46a76..f2c8644b9b3 100644 --- a/core/src/main/scala/kafka/utils/Mx4jLoader.scala +++ b/core/src/main/scala/kafka/utils/Mx4jLoader.scala @@ -57,11 +57,11 @@ object Mx4jLoader extends Logging { httpAdaptorClass.getMethod("setProcessor", Class.forName("mx4j.tools.adaptor.http.ProcessorMBean")).invoke(httpAdaptor, xsltProcessor.asInstanceOf[AnyRef]) mbs.registerMBean(xsltProcessor, processorName) httpAdaptorClass.getMethod("start").invoke(httpAdaptor) - info("mx4j successfuly loaded") + info("mx4j successfully loaded") return true } catch { - case _: ClassNotFoundException => + case _: ClassNotFoundException => info("Will not load MX4J, mx4j-tools.jar is not in the classpath") case e: Throwable => warn("Could not start register mbean in JMX", e) diff --git a/core/src/test/scala/unit/kafka/admin/AdminTest.scala b/core/src/test/scala/unit/kafka/admin/AdminTest.scala index a1c317e4e2c..88aff62c5c3 100755 --- a/core/src/test/scala/unit/kafka/admin/AdminTest.scala +++ b/core/src/test/scala/unit/kafka/admin/AdminTest.scala @@ -169,7 +169,7 @@ class AdminTest extends ZooKeeperTestHarness with Logging with RackAwareTest { zkUtils.updatePersistentPath(ConfigEntityZNode.path(ConfigType.Client, clientId), Json.encodeAsString(map.asJava)) val configInZk: Map[String, Properties] = AdminUtils.fetchAllEntityConfigs(zkUtils, ConfigType.Client) - assertEquals("Must have 1 overriden client config", 1, configInZk.size) + assertEquals("Must have 1 overridden client config", 1, configInZk.size) assertEquals(props, configInZk(clientId)) // Test that the existing clientId overrides are read diff --git a/core/src/test/scala/unit/kafka/zk/AdminZkClientTest.scala b/core/src/test/scala/unit/kafka/zk/AdminZkClientTest.scala index 81d938bd37f..9f81c18c7b1 100644 --- a/core/src/test/scala/unit/kafka/zk/AdminZkClientTest.scala +++ b/core/src/test/scala/unit/kafka/zk/AdminZkClientTest.scala @@ -307,7 +307,7 @@ class AdminZkClientTest extends ZooKeeperTestHarness with Logging with RackAware zkClient.setOrCreateEntityConfigs(ConfigType.Client, clientId, props) val configInZk: Map[String, Properties] = adminZkClient.fetchAllEntityConfigs(ConfigType.Client) - assertEquals("Must have 1 overriden client config", 1, configInZk.size) + assertEquals("Must have 1 overridden client config", 1, configInZk.size) assertEquals(props, configInZk(clientId)) // Test that the existing clientId overrides are read diff --git a/docs/design.html b/docs/design.html index 0061a53c49d..88e737ab72b 100644 --- a/docs/design.html +++ b/docs/design.html @@ -277,7 +277,7 @@ offsets are both updated or neither is. We follow similar patterns for many other data systems which require these stronger semantics and for which the messages do not have a primary key to allow for deduplication.

So effectively Kafka supports exactly-once delivery in Kafka Streams, and the transactional producer/consumer can be used generally to provide - exactly-once delivery when transfering and processing data between Kafka topics. Exactly-once delivery for other destination systems generally requires cooperation with such systems, but Kafka provides the + exactly-once delivery when transferring and processing data between Kafka topics. Exactly-once delivery for other destination systems generally requires cooperation with such systems, but Kafka provides the offset which makes implementing this feasible (see also Kafka Connect). Otherwise, Kafka guarantees at-least-once delivery by default, and allows the user to implement at-most-once delivery by disabling retries on the producer and committing offsets in the consumer prior to processing a batch of messages. diff --git a/docs/security.html b/docs/security.html index b0183343a4b..6ff9eba2cbf 100644 --- a/docs/security.html +++ b/docs/security.html @@ -220,7 +220,7 @@ ssl.keystore.location=/var/private/ssl/client.keystore.jks ssl.keystore.password=test1234 ssl.key.password=test1234 - + Other configuration settings that may also be needed depending on our requirements and the broker configuration:

  1. ssl.provider (Optional). The name of the security provider used for SSL connections. Default value is the default security provider of the JVM.
  2. @@ -342,7 +342,7 @@
  3. Pass the JAAS config file location as JVM parameter to each client JVM. For example:
        -Djava.security.auth.login.config=/etc/kafka/kafka_client_jaas.conf
  4. -
+ @@ -455,7 +455,7 @@ Clients (producers, consumers, connect workers, etc) will authenticate to the cluster with their own principal (usually with the same name as the user running the client), so obtain or create these principals as needed. Then configure the JAAS configuration property for each client. - Different clients within a JVM may run as different users by specifiying different principals. + Different clients within a JVM may run as different users by specifying different principals. The property sasl.jaas.config in producer.properties or consumer.properties describes how clients like producer and consumer can connect to the Kafka Broker. The following is an example configuration for a client using a keytab (recommended for long-running processes): @@ -621,9 +621,9 @@
  • Configuring Kafka Clients
    To configure SASL authentication on the clients:
      -
    1. Configure the JAAS configuration property for each client in producer.properties or consumer.properties. +
    2. Configure the JAAS configuration property for each client in producer.properties or consumer.properties. The login module describes how the clients like producer and consumer can connect to the Kafka Broker. - The following is an example configuration for a client for the SCRAM mechanisms: + The following is an example configuration for a client for the SCRAM mechanisms:
          sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
               username="alice" \
      @@ -973,7 +973,7 @@
                       
      1. Configure the JAAS configuration property for each client in producer.properties or consumer.properties. The login module describes how the clients like producer and consumer can connect to the Kafka Broker. - The following is an example configuration for a client for the token authentication: + The following is an example configuration for a client for the token authentication:
            sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
                 username="tokenID123" \
        diff --git a/release.py b/release.py
        index 3573a7f8433..1cf54c4598e 100755
        --- a/release.py
        +++ b/release.py
        @@ -285,13 +285,13 @@ if not user_ok("""Requirements:
               signing.keyId=your-gpgkeyId
               signing.password=your-gpg-passphrase
               signing.secretKeyRingFile=/Users/your-id/.gnupg/secring.gpg (if you are using GPG 2.1 and beyond, then this file will no longer exist anymore, and you have to manually create it from the new private key directory with "gpg --export-secret-keys -o ~/.gnupg/secring.gpg")
        -8. ~/.m2/settings.xml configured for pgp signing and uploading to apache release maven, i.e., 
        +8. ~/.m2/settings.xml configured for pgp signing and uploading to apache release maven, i.e.,
                
                   apache.releases.https
                   your-apache-id
                   your-apache-passwd
                 
        -	
        +        
                     your-gpgkeyId
                     your-gpg-passphase
                 
        @@ -299,18 +299,18 @@ if not user_ok("""Requirements:
                     gpg-signing
                     
                         your-gpgkeyId
        -        	your-gpgkeyId
        +                your-gpgkeyId
                     
                 
         9. You may also need to update some gnupgp configs:
        -	~/.gnupg/gpg-agent.conf
        -	allow-loopback-pinentry
        +        ~/.gnupg/gpg-agent.conf
        +        allow-loopback-pinentry
         
        -	~/.gnupg/gpg.conf
        -	use-agent
        -	pinentry-mode loopback
        +        ~/.gnupg/gpg.conf
        +        use-agent
        +        pinentry-mode loopback
         
        -	echo RELOADAGENT | gpg-connect-agent
        +        echo RELOADAGENT | gpg-connect-agent
         
         If any of these are missing, see https://cwiki.apache.org/confluence/display/KAFKA/Release+Process for instructions on setting them up.
         
        @@ -404,7 +404,7 @@ cmd("remove backup pom.xml", "rm streams/quickstart/pom.xml.orig")
         cmd("remove backup java pom.xml", "rm streams/quickstart/java/pom.xml.orig")
         cmd("remove backup java pom.xml", "rm streams/quickstart/java/src/main/resources/archetype-resources/pom.xml.orig")
         # Command in explicit list due to messages with spaces
        -cmd("Commiting version number updates", ["git", "commit", "-a", "-m", "Bump version to %s" % release_version])
        +cmd("Committing version number updates", ["git", "commit", "-a", "-m", "Bump version to %s" % release_version])
         # Command in explicit list due to messages with spaces
         cmd("Tagging release candidate %s" % rc_tag, ["git", "tag", "-a", rc_tag, "-m", rc_tag])
         rc_githash = cmd_output("git show-ref --hash " + rc_tag)
        diff --git a/streams/src/test/java/org/apache/kafka/streams/TopologyTest.java b/streams/src/test/java/org/apache/kafka/streams/TopologyTest.java
        index 289cdf0d6dc..de856bba236 100644
        --- a/streams/src/test/java/org/apache/kafka/streams/TopologyTest.java
        +++ b/streams/src/test/java/org/apache/kafka/streams/TopologyTest.java
        @@ -258,7 +258,7 @@ public class TopologyTest {
             public void shouldNotAllowToAddStateStoreToNonExistingProcessor() {
                 mockStoreBuilder();
                 EasyMock.replay(storeBuilder);
        -        topology.addStateStore(storeBuilder, "no-such-processsor");
        +        topology.addStateStore(storeBuilder, "no-such-processor");
             }
         
             @Test
        diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/InternalTopologyBuilderTest.java b/streams/src/test/java/org/apache/kafka/streams/processor/internals/InternalTopologyBuilderTest.java
        index 8ddb0b50040..d49dd9d0878 100644
        --- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/InternalTopologyBuilderTest.java
        +++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/InternalTopologyBuilderTest.java
        @@ -103,7 +103,7 @@ public class InternalTopologyBuilderTest {
             @Test
             public void shouldAddPatternSourceWithoutOffsetReset() {
                 final Pattern expectedPattern = Pattern.compile("test-.*");
        -        
        +
                 builder.addSource(null, "source", null, stringSerde.deserializer(), stringSerde.deserializer(), Pattern.compile("test-.*"));
         
                 assertEquals(expectedPattern.pattern(), builder.sourceTopicPattern().pattern());
        @@ -286,7 +286,7 @@ public class InternalTopologyBuilderTest {
         
             @Test(expected = TopologyException.class)
             public void testAddStateStoreWithNonExistingProcessor() {
        -        builder.addStateStore(storeBuilder, "no-such-processsor");
        +        builder.addStateStore(storeBuilder, "no-such-processor");
             }
         
             @Test