diff --git a/NOTICE-binary b/NOTICE-binary index 50dabb33d61..b625e142293 100644 --- a/NOTICE-binary +++ b/NOTICE-binary @@ -349,7 +349,7 @@ The project maintains the following source code repositories: Angular JS, v1.6.6 * License MIT (http://www.opensource.org/licenses/mit-license.php) * Project: http://angularjs.org -* Coyright: (c) 2010-2017 Google, Inc. +* Copyright: (c) 2010-2017 Google, Inc. aopalliance Version 1 * License: all the source code provided by AOP Alliance is Public Domain. diff --git a/clients/src/main/java/org/apache/kafka/common/KafkaFuture.java b/clients/src/main/java/org/apache/kafka/common/KafkaFuture.java index d4b5f35314b..257c98e2930 100644 --- a/clients/src/main/java/org/apache/kafka/common/KafkaFuture.java +++ b/clients/src/main/java/org/apache/kafka/common/KafkaFuture.java @@ -135,7 +135,7 @@ public abstract class KafkaFuture implements Future { * The action may be invoked by the thread that calls {@code whenComplete} or it may be invoked by the thread that * completes the future. * - * @param action the action to preform + * @param action the action to perform * @return the new future */ public abstract KafkaFuture whenComplete(BiConsumer action); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaConsumerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaConsumerTest.java index c2a3a4951e7..5cdb268e045 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaConsumerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaConsumerTest.java @@ -304,7 +304,7 @@ public class KafkaConsumerTest { KafkaMetric existingMetric = (KafkaMetric) consumer.metrics().entrySet().iterator().next().getValue(); consumer.registerMetricForSubscription(existingMetric); - // This test would fail without the check as the exising metric is registered in the consumer on startup + // This test would fail without the check as the existing metric is registered in the consumer on startup Mockito.verify(clientTelemetryReporter, atMostOnce()).metricChange(existingMetric); } } diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java index 02525989283..fbb3484a03f 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java @@ -2728,7 +2728,7 @@ public class KafkaProducerTest { props, new StringSerializer(), new StringSerializer())) { KafkaMetric existingMetric = (KafkaMetric) producer.metrics().entrySet().iterator().next().getValue(); producer.registerMetricForSubscription(existingMetric); - // This test would fail without the check as the exising metric is registered in the producer on startup + // This test would fail without the check as the existing metric is registered in the producer on startup Mockito.verify(clientTelemetryReporter, atMostOnce()).metricChange(existingMetric); } } @@ -2747,7 +2747,7 @@ public class KafkaProducerTest { props, new StringSerializer(), new StringSerializer())) { KafkaMetric existingMetric = (KafkaMetric) producer.metrics().entrySet().iterator().next().getValue(); producer.unregisterMetricFromSubscription(existingMetric); - // This test would fail without the check as the exising metric is registered in the consumer on startup + // This test would fail without the check as the existing metric is registered in the consumer on startup Mockito.verify(clientTelemetryReporter, never()).metricRemoval(existingMetric); } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/KafkaConfigBackingStore.java b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/KafkaConfigBackingStore.java index 63b33a792cf..65a3256b760 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/KafkaConfigBackingStore.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/KafkaConfigBackingStore.java @@ -1258,7 +1258,7 @@ public final class KafkaConfigBackingStore extends KafkaTopicBasedBackingStore i } else { // TRACE level since there may be many of these records in the config topic log.trace( - "Ignoring old logging level {} for namespace {} that was writen to the config topic before this worker completed startup", + "Ignoring old logging level {} for namespace {} that was written to the config topic before this worker completed startup", level, namespace ); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnect.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnect.java index b576cda56a7..39d9fadc82e 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnect.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnect.java @@ -495,7 +495,7 @@ abstract class EmbeddedConnect { Response response = requestPost(restartEndpoint, "", Collections.emptyMap()); try { if (response.getStatus() < Response.Status.BAD_REQUEST.getStatusCode()) { - //only the 202 stauts returns a body + //only the 202 status returns a body if (response.getStatus() == Response.Status.ACCEPTED.getStatusCode()) { return mapper.readerFor(ConnectorStateInfo.class) .readValue(responseToString(response)); diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntime.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntime.java index 283a8df49ab..865648da6ab 100644 --- a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntime.java +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntime.java @@ -493,7 +493,7 @@ public class CoordinatorRuntime, U> implements Aut final MemoryRecordsBuilder builder; /** - * The timer used to enfore the append linger time if + * The timer used to enforce the append linger time if * it is non-zero. */ final Optional lingerTimeoutTask; diff --git a/core/src/main/scala/kafka/server/FetchSession.scala b/core/src/main/scala/kafka/server/FetchSession.scala index 773958cd431..0e8a35c6a37 100644 --- a/core/src/main/scala/kafka/server/FetchSession.scala +++ b/core/src/main/scala/kafka/server/FetchSession.scala @@ -224,7 +224,7 @@ class CachedPartition(var topic: String, * fields are read or modified. This includes modification of the session partition map. * * @param id The unique fetch session ID. - * @param privileged True if this session is privileged. Sessions crated by followers + * @param privileged True if this session is privileged. Sessions created by followers * are privileged; session created by consumers are not. * @param partitionMap The CachedPartitionMap. * @param usesTopicIds True if this session is using topic IDs diff --git a/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java b/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java index 1ffee5dc7f6..4f550c6751b 100644 --- a/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java +++ b/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java @@ -2734,7 +2734,7 @@ public class SharePartitionManagerTest { SharePartition sp1 = mock(SharePartition.class); // Do not make the share partition acquirable hence it shouldn't be removed from the cache, - // as it won't be part of replica manger readFromLog request. + // as it won't be part of replica manager readFromLog request. when(sp1.maybeAcquireFetchLock()).thenReturn(false); when(sp1.maybeInitialize()).thenReturn(CompletableFuture.completedFuture(null));