mirror of https://github.com/apache/kafka.git
MINOR: Fix typos in multiple files (#19102)
Fix typos in multiple files Reviewers: Andrew Schofield <aschofield@confluent.io>
This commit is contained in:
parent
6ecf6817ad
commit
3d7ac0c3d1
|
@ -81,7 +81,7 @@ class FieldPathNotationTest {
|
|||
@Test
|
||||
void shouldBuildV2WhenIncludesDotsAndBacktickPair() {
|
||||
// Given v2 and fields including dots
|
||||
// When backticks are wrapping a field name (i.e. withing edges or between dots)
|
||||
// When backticks are wrapping a field name (i.e. within edges or between dots)
|
||||
// Then build a path with steps separated by dots and not including backticks
|
||||
assertParseV2("`foo.bar.baz`", "foo.bar.baz");
|
||||
assertParseV2("foo.`bar.baz`", "foo", "bar.baz");
|
||||
|
@ -92,7 +92,7 @@ class FieldPathNotationTest {
|
|||
@Test
|
||||
void shouldBuildV2AndIgnoreBackticksThatAreNotWrapping() {
|
||||
// Given v2 and fields including dots and backticks
|
||||
// When backticks are wrapping a field name (i.e. withing edges or between dots)
|
||||
// When backticks are wrapping a field name (i.e. within edges or between dots)
|
||||
// Then build a path with steps separated by dots and including non-wrapping backticks
|
||||
assertParseV2("foo.``bar.baz`", "foo", "`bar.baz");
|
||||
assertParseV2("foo.`bar.baz``", "foo", "bar.baz`");
|
||||
|
@ -105,7 +105,7 @@ class FieldPathNotationTest {
|
|||
@Test
|
||||
void shouldBuildV2AndEscapeBackticks() {
|
||||
// Given v2 and fields including dots and backticks
|
||||
// When backticks are wrapping a field name (i.e. withing edges or between dots)
|
||||
// When backticks are wrapping a field name (i.e. within edges or between dots)
|
||||
// and wrapping backticks that are part of the field name are escaped with backslashes
|
||||
// Then build a path with steps separated by dots and including escaped and non-wrapping backticks
|
||||
assertParseV2("foo.`bar\\`.baz`", "foo", "bar`.baz");
|
||||
|
|
|
@ -300,7 +300,7 @@ public class SharePartition {
|
|||
private long endOffset;
|
||||
|
||||
/**
|
||||
* The initial read gap offset tracks if there are any gaps in the in-flight batch during intial
|
||||
* The initial read gap offset tracks if there are any gaps in the in-flight batch during initial
|
||||
* read of the share partition state from the persister.
|
||||
*/
|
||||
private InitialReadGapOffset initialReadGapOffset;
|
||||
|
|
|
@ -65,7 +65,7 @@ class DelegationTokenEndToEndAuthorizationWithOwnerTest extends DelegationTokenE
|
|||
private val describeTokenFailPassword = "describe-token-fail-password"
|
||||
|
||||
override def configureSecurityAfterServersStart(): Unit = {
|
||||
// Create the Acls before calling super which will create the additiona tokens
|
||||
// Create the Acls before calling super which will create the additional tokens
|
||||
Using.resource(createPrivilegedAdminClient()) { superuserAdminClient =>
|
||||
superuserAdminClient.createAcls(List(AclTokenOtherDescribe, AclTokenCreate, AclTokenDescribe).asJava).values
|
||||
|
||||
|
|
|
@ -898,7 +898,7 @@
|
|||
As of 2.6.0 Kafka Streams deprecates <code>KStream.through()</code> in favor of the new <code>KStream.repartition()</code> operator
|
||||
(as per <a href="https://cwiki.apache.org/confluence/display/KAFKA/KIP-221%3A+Enhance+DSL+with+Connecting+Topic+Creation+and+Repartition+Hint">KIP-221</a>).
|
||||
<code>KStream.repartition()</code> is similar to <code>KStream.through()</code>, however Kafka Streams will manage the topic for you.
|
||||
If you need to write into and read back from a topic that you mange, you can fall back to use <code>KStream.to()</code> in combination with <code>StreamsBuilder#stream()</code>.
|
||||
If you need to write into and read back from a topic that you manage, you can fall back to use <code>KStream.to()</code> in combination with <code>StreamsBuilder#stream()</code>.
|
||||
Please refer to the <a href="/{{version}}/documentation/streams/developer-guide/dsl-api.html">developer guide</a> for more details about <code>KStream.repartition()</code>.
|
||||
</p>
|
||||
|
||||
|
|
|
@ -386,7 +386,7 @@
|
|||
</li>
|
||||
<li>
|
||||
KIP-714 is now enabled for Kafka Streams via <a href="https://cwiki.apache.org/confluence/display/KAFKA/KIP-1076%3A++Metrics+for+client+applications+KIP-714+extension">KIP-1076</a>.
|
||||
This allows to not only collect the metric of the internally used clients of a Kafka Streams appliction via a broker-side plugin,
|
||||
This allows to not only collect the metric of the internally used clients of a Kafka Streams application via a broker-side plugin,
|
||||
but also to collect the <a href="/{{version}}/documentation/#kafka_streams_monitoring">metrics</a> of the Kafka Streams runtime itself.
|
||||
</li>
|
||||
<li>
|
||||
|
|
|
@ -32,7 +32,7 @@ import java.util.Optional;
|
|||
*
|
||||
* @param name The name of the topic.
|
||||
* @param numberOfPartitions The number of partitions for the topic.
|
||||
* @param replicationFactor The replication factor of the topic. If undefiend, the broker default is used.
|
||||
* @param replicationFactor The replication factor of the topic. If undefined, the broker default is used.
|
||||
* @param topicConfigs The topic configurations of the topic.
|
||||
*/
|
||||
public record ConfiguredInternalTopic(String name,
|
||||
|
|
|
@ -1772,7 +1772,7 @@ public class ReplicationControlManager {
|
|||
}
|
||||
|
||||
/**
|
||||
* Trigger unclean leader election for partitions without leader (visiable for testing)
|
||||
* Trigger unclean leader election for partitions without leader (visible for testing)
|
||||
*
|
||||
* @param records The record list to append to.
|
||||
* @param maxElections The maximum number of elections to perform.
|
||||
|
|
|
@ -517,7 +517,7 @@ public final class MetaPropertiesEnsemble {
|
|||
}
|
||||
if (metaProps.directoryId().isPresent()) {
|
||||
if (DirectoryId.reserved(metaProps.directoryId().get())) {
|
||||
throw new RuntimeException("Invalid resrved directory ID " +
|
||||
throw new RuntimeException("Invalid reserved directory ID " +
|
||||
metaProps.directoryId().get() + " found in " + logDir);
|
||||
}
|
||||
String prevLogDir = seenUuids.put(metaProps.directoryId().get(), logDir);
|
||||
|
|
|
@ -580,7 +580,7 @@ public class PartitionChangeBuilderTest {
|
|||
.setLeaderRecoveryState(LeaderRecoveryState.RECOVERING.value());
|
||||
|
||||
if (version >= 2) {
|
||||
// The test partition has ELR, so unclean election will clear these fiedls.
|
||||
// The test partition has ELR, so unclean election will clear these fields.
|
||||
record.setEligibleLeaderReplicas(Collections.emptyList())
|
||||
.setLastKnownElr(Collections.emptyList());
|
||||
}
|
||||
|
|
|
@ -250,7 +250,7 @@ public enum Feature {
|
|||
* - The feature X has default version = XV_10 (dependency = {}), latest production = XV_10 (dependency = {})
|
||||
* - The feature X has default version = XV_10 (dependency = {Y: YV_3}), latest production = XV_11 (dependency = {Y: YV_4})
|
||||
* The feature Y has default version = YV_3 (dependency = {}), latest production = YV_4 (dependency = {})
|
||||
* - The feature X has default version = XV_10 (dependency = {MetadataVersion: IBP_4_0_IV0}), boostrap MV = IBP_4_0_IV0,
|
||||
* - The feature X has default version = XV_10 (dependency = {MetadataVersion: IBP_4_0_IV0}), bootstrap MV = IBP_4_0_IV0,
|
||||
* latest production = XV_11 (dependency = {MetadataVersion: IBP_4_0_IV1}), MV latest production = IBP_4_0_IV1
|
||||
*
|
||||
* @param feature the feature to validate.
|
||||
|
|
|
@ -228,7 +228,7 @@ public class LagFetchIntegrationTest {
|
|||
assertThat(lagInfo.currentOffsetPosition(), equalTo(0L));
|
||||
assertThat(lagInfo.endOffsetPosition(), equalTo(5L));
|
||||
assertThat(lagInfo.offsetLag(), equalTo(5L));
|
||||
// standby thread wont proceed to RUNNING before this barrier is crossed
|
||||
// standby thread won't proceed to RUNNING before this barrier is crossed
|
||||
lagCheckBarrier.await(60, TimeUnit.SECONDS);
|
||||
|
||||
// wait till the lag goes down to 0, on the standby
|
||||
|
|
|
@ -169,7 +169,7 @@ public class Joined<K, VLeft, VRight> implements NamedOperation<Joined<K, VLeft,
|
|||
|
||||
|
||||
/**
|
||||
* Create an instance of {@code Joined} with aother value {@link Serde}.
|
||||
* Create an instance of {@code Joined} with another value {@link Serde}.
|
||||
* {@code null} values are accepted and will be replaced by the default value serde as defined in config.
|
||||
*
|
||||
* @param rightValueSerde
|
||||
|
|
|
@ -210,7 +210,7 @@ public interface SessionWindowedCogroupedKStream<K, V> {
|
|||
* {@link KafkaStreams#store(StoreQueryParameters)} KafkaStreams#store(...)}:
|
||||
* <pre>{@code
|
||||
* KafkaStreams streams = ... // some windowed aggregation on value type double
|
||||
* Sting queryableStoreName = ... // the queryableStoreName should be the name of the store as defined by the Materialized instance
|
||||
* String queryableStoreName = ... // the queryableStoreName should be the name of the store as defined by the Materialized instance
|
||||
* StoreQueryParameters<ReadOnlySessionStore<String, Long>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.sessionStore());
|
||||
* ReadOnlySessionStore<String,Long> localWindowStore = streams.store(storeQueryParams);
|
||||
* String key = "some-key";
|
||||
|
|
|
@ -131,7 +131,7 @@ public interface SessionWindowedKStream<K, V> {
|
|||
* {@link KafkaStreams#store(StoreQueryParameters) KafkaStreams#store(...)}:
|
||||
* <pre>{@code
|
||||
* KafkaStreams streams = ... // compute sum
|
||||
* Sting queryableStoreName = ... // the queryableStoreName should be the name of the store as defined by the Materialized instance
|
||||
* String queryableStoreName = ... // the queryableStoreName should be the name of the store as defined by the Materialized instance
|
||||
* StoreQueryParameters<ReadOnlySessionStore<String, Long>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.sessionStore());
|
||||
* ReadOnlySessionStore<String,Long> sessionStore = streams.store(storeQueryParams);
|
||||
* String key = "some-key";
|
||||
|
@ -178,7 +178,7 @@ public interface SessionWindowedKStream<K, V> {
|
|||
* {@link KafkaStreams#store(StoreQueryParameters) KafkaStreams#store(...)}:
|
||||
* <pre>{@code
|
||||
* KafkaStreams streams = ... // compute sum
|
||||
* Sting queryableStoreName = ... // the queryableStoreName should be the name of the store as defined by the Materialized instance
|
||||
* String queryableStoreName = ... // the queryableStoreName should be the name of the store as defined by the Materialized instance
|
||||
* StoreQueryParameters<ReadOnlySessionStore<String, Long>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.sessionStore());
|
||||
* ReadOnlySessionStore<String,Long> sessionStore = streams.store(storeQueryParams);
|
||||
* String key = "some-key";
|
||||
|
@ -332,7 +332,7 @@ public interface SessionWindowedKStream<K, V> {
|
|||
* {@link KafkaStreams#store(StoreQueryParameters) KafkaStreams#store(...)}:
|
||||
* <pre>{@code
|
||||
* KafkaStreams streams = ... // compute sum
|
||||
* Sting queryableStoreName = ... // the queryableStoreName should be the name of the store as defined by the Materialized instance
|
||||
* String queryableStoreName = ... // the queryableStoreName should be the name of the store as defined by the Materialized instance
|
||||
* StoreQueryParameters<ReadOnlySessionStore<String, Long>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.sessionStore());
|
||||
* ReadOnlySessionStore<String,Long> sessionStore = streams.store(storeQueryParams);
|
||||
* String key = "some-key";
|
||||
|
@ -393,7 +393,7 @@ public interface SessionWindowedKStream<K, V> {
|
|||
* {@link KafkaStreams#store(StoreQueryParameters)} KafkaStreams#store(...)}:
|
||||
* <pre>{@code
|
||||
* KafkaStreams streams = ... // compute sum
|
||||
* Sting queryableStoreName = ... // the queryableStoreName should be the name of the store as defined by the Materialized instance
|
||||
* String queryableStoreName = ... // the queryableStoreName should be the name of the store as defined by the Materialized instance
|
||||
* StoreQueryParameters<ReadOnlySessionStore<String, Long>> storeQueryParams = StoreQueryParameters.fromNameAndType(QueryableStoreTypes.sessionStore());
|
||||
* ReadOnlySessionStore<String,Long> sessionStore = streams.store(storeQueryParams);
|
||||
* String key = "some-key";
|
||||
|
@ -552,7 +552,7 @@ public interface SessionWindowedKStream<K, V> {
|
|||
* {@link KafkaStreams#store(StoreQueryParameters) KafkaStreams#store(...)}:
|
||||
* <pre>{@code
|
||||
* KafkaStreams streams = ... // some windowed aggregation on value type double
|
||||
* Sting queryableStoreName = ... // the queryableStoreName should be the name of the store as defined by the Materialized instance
|
||||
* String queryableStoreName = ... // the queryableStoreName should be the name of the store as defined by the Materialized instance
|
||||
* StoreQueryParameters<ReadOnlySessionStore<String, Long>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.sessionStore());
|
||||
* ReadOnlySessionStore<String,Long> sessionStore = streams.store(storeQueryParams);
|
||||
* String key = "some-key";
|
||||
|
@ -614,7 +614,7 @@ public interface SessionWindowedKStream<K, V> {
|
|||
* {@link KafkaStreams#store(StoreQueryParameters) KafkaStreams#store(...)}:
|
||||
* <pre>{@code
|
||||
* KafkaStreams streams = ... // some windowed aggregation on value type double
|
||||
* Sting queryableStoreName = ... // the queryableStoreName should be the name of the store as defined by the Materialized instance
|
||||
* String queryableStoreName = ... // the queryableStoreName should be the name of the store as defined by the Materialized instance
|
||||
* StoreQueryParameters<ReadOnlySessionStore<String, Long>> storeQueryParams = StoreQueryParameters.fromNameAndType(queryableStoreName, QueryableStoreTypes.sessionStore());
|
||||
* ReadOnlySessionStore<String,Long> sessionStore = streams.store(storeQueryParams);
|
||||
* String key = "some-key";
|
||||
|
|
|
@ -214,7 +214,7 @@ public class Graph<V extends Comparable<V>> {
|
|||
}
|
||||
|
||||
// Add a dummy null node connected to every existing node with residual flow 1 and cost 0
|
||||
// Then try to find negative cylce starting using dummy node as source node. Since there's no
|
||||
// Then try to find negative cycle starting using dummy node as source node. Since there's no
|
||||
// path from original nodes to null node, negative cycles must be within original nodes.
|
||||
final TreeMap<V, Edge> destMap = new TreeMap<>();
|
||||
for (final V node : residualGraph.nodes) {
|
||||
|
|
|
@ -4673,7 +4673,7 @@ public class TaskManagerTest {
|
|||
verifyNoMoreInteractions(activeTaskCreator);
|
||||
verifyNoMoreInteractions(standbyTaskCreator);
|
||||
|
||||
// verify the recycled task is now being used as an assiged Active
|
||||
// verify the recycled task is now being used as an assigned Active
|
||||
assertEquals(Collections.singletonMap(taskId00, activeTask), taskManager.activeTaskMap());
|
||||
assertEquals(Collections.emptyMap(), taskManager.standbyTaskMap());
|
||||
}
|
||||
|
@ -4734,7 +4734,7 @@ public class TaskManagerTest {
|
|||
verifyNoMoreInteractions(activeTaskCreator);
|
||||
verifyNoMoreInteractions(standbyTaskCreator);
|
||||
|
||||
// verify the recycled task is now being used as an assiged Active
|
||||
// verify the recycled task is now being used as an assigned Active
|
||||
assertEquals(Collections.singletonMap(taskId00, activeTask), taskManager.activeTaskMap());
|
||||
assertEquals(Collections.emptyMap(), taskManager.standbyTaskMap());
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue