mirror of https://github.com/apache/kafka.git
MINOR: fix linking errors in javadoc (#8198)
This improvement fixes several linking errors to classes and methods from within javadocs. Related to #8291 Reviewers: Konstantine Karantasis <konstantine@confluent.io>
This commit is contained in:
parent
5ccd3cd46d
commit
4f6907947a
|
|
@ -248,7 +248,7 @@ public class Metadata implements Closeable {
|
|||
* is set for topics if required and expired topics are removed from the metadata.
|
||||
*
|
||||
* @param requestVersion The request version corresponding to the update response, as provided by
|
||||
* {@link #newMetadataRequestAndVersion()}.
|
||||
* {@link #newMetadataRequestAndVersion(long)}.
|
||||
* @param response metadata response received from the broker
|
||||
* @param isPartialUpdate whether the metadata request was for a subset of the active topics
|
||||
* @param nowMs current time in milliseconds
|
||||
|
|
|
|||
|
|
@ -18,8 +18,10 @@ package org.apache.kafka.clients.admin;
|
|||
|
||||
import org.apache.kafka.common.annotation.InterfaceStability;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Options for the {@link AdminClient#alterConsumerGroupOffsets(String, Map)} call.
|
||||
* Options for the {@link AdminClient#alterConsumerGroupOffsets(String, Map, AlterConsumerGroupOffsetsOptions)} call.
|
||||
*
|
||||
* The API of this class is evolving, see {@link AdminClient} for details.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -17,8 +17,11 @@
|
|||
|
||||
package org.apache.kafka.clients.admin;
|
||||
|
||||
import org.apache.kafka.common.ElectionType;
|
||||
import org.apache.kafka.common.annotation.InterfaceStability;
|
||||
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* Options for {@link Admin#electLeaders(ElectionType, Set, ElectLeadersOptions)}.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -20,6 +20,9 @@ package org.apache.kafka.clients.admin;
|
|||
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.kafka.common.ElectionType;
|
||||
import org.apache.kafka.common.KafkaFuture;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.annotation.InterfaceStability;
|
||||
|
|
|
|||
|
|
@ -17,12 +17,15 @@
|
|||
|
||||
package org.apache.kafka.clients.admin;
|
||||
|
||||
import org.apache.kafka.common.ElectionType;
|
||||
import org.apache.kafka.common.annotation.InterfaceStability;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* Options for {@link Admin#electPreferredLeaders(Collection, ElectPreferredLeadersOptions)}.
|
||||
*
|
||||
* <p>
|
||||
* The API of this class is evolving, see {@link Admin} for details.
|
||||
*
|
||||
* @deprecated Since 2.4.0. Use {@link Admin#electLeaders(ElectionType, Set, ElectLeadersOptions)}.
|
||||
|
|
|
|||
|
|
@ -22,6 +22,8 @@ import java.util.Collection;
|
|||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.kafka.common.ElectionType;
|
||||
import org.apache.kafka.common.KafkaFuture;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.annotation.InterfaceStability;
|
||||
|
|
|
|||
|
|
@ -19,6 +19,8 @@ package org.apache.kafka.clients.admin;
|
|||
import org.apache.kafka.common.IsolationLevel;
|
||||
import org.apache.kafka.common.annotation.InterfaceStability;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Options for {@link AdminClient#listOffsets(Map)}.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@
|
|||
*/
|
||||
package org.apache.kafka.clients.admin;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* This class allows to specify the desired offsets when using {@link KafkaAdminClient#listOffsets(Map, ListOffsetsOptions)}
|
||||
|
|
|
|||
|
|
@ -18,13 +18,15 @@ package org.apache.kafka.clients.consumer.internals;
|
|||
|
||||
import org.apache.kafka.common.errors.RetriableException;
|
||||
import org.apache.kafka.common.protocol.Errors;
|
||||
import org.apache.kafka.common.utils.Timer;
|
||||
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.ConcurrentLinkedQueue;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
/**
|
||||
* Result of an asynchronous request from {@link ConsumerNetworkClient}. Use {@link ConsumerNetworkClient#poll(long)}
|
||||
* Result of an asynchronous request from {@link ConsumerNetworkClient}. Use {@link ConsumerNetworkClient#poll(Timer)}
|
||||
* (and variants) to finish a request future. Use {@link #isDone()} to check if the future is complete, and
|
||||
* {@link #succeeded()} to check if the request completed successfully. Typical usage might look like this:
|
||||
*
|
||||
|
|
|
|||
|
|
@ -26,16 +26,16 @@ package org.apache.kafka.common;
|
|||
* There will be one invocation of {@link ClusterResourceListener#onUpdate(ClusterResource)} after each metadata response.
|
||||
* Note that the cluster id may be null when the Kafka broker version is below 0.10.1.0. If you receive a null cluster id, you can expect it to always be null unless you have a cluster with multiple broker versions which can happen if the cluster is being upgraded while the client is running.
|
||||
* <p>
|
||||
* {@link org.apache.kafka.clients.producer.ProducerInterceptor} : The {@link ClusterResourceListener#onUpdate(ClusterResource)} method will be invoked after {@link org.apache.kafka.clients.producer.ProducerInterceptor#onSend(ProducerRecord)}
|
||||
* but before {@link org.apache.kafka.clients.producer.ProducerInterceptor#onAcknowledgement(RecordMetadata, Exception)} .
|
||||
* {@link org.apache.kafka.clients.producer.ProducerInterceptor} : The {@link ClusterResourceListener#onUpdate(ClusterResource)} method will be invoked after {@link org.apache.kafka.clients.producer.ProducerInterceptor#onSend(org.apache.kafka.clients.producer.ProducerRecord)}
|
||||
* but before {@link org.apache.kafka.clients.producer.ProducerInterceptor#onAcknowledgement(org.apache.kafka.clients.producer.RecordMetadata, Exception)} .
|
||||
* <p>
|
||||
* {@link org.apache.kafka.clients.consumer.ConsumerInterceptor} : The {@link ClusterResourceListener#onUpdate(ClusterResource)} method will be invoked before {@link org.apache.kafka.clients.consumer.ConsumerInterceptor#onConsume(ConsumerRecords)}
|
||||
* {@link org.apache.kafka.clients.consumer.ConsumerInterceptor} : The {@link ClusterResourceListener#onUpdate(ClusterResource)} method will be invoked before {@link org.apache.kafka.clients.consumer.ConsumerInterceptor#onConsume(org.apache.kafka.clients.consumer.ConsumerRecords)}
|
||||
* <p>
|
||||
* {@link org.apache.kafka.common.serialization.Serializer} : The {@link ClusterResourceListener#onUpdate(ClusterResource)} method will be invoked before {@link org.apache.kafka.common.serialization.Serializer#serialize(String, Object)}
|
||||
* <p>
|
||||
* {@link org.apache.kafka.common.serialization.Deserializer} : The {@link ClusterResourceListener#onUpdate(ClusterResource)} method will be invoked before {@link org.apache.kafka.common.serialization.Deserializer#deserialize(String, byte[])}
|
||||
* <p>
|
||||
* {@link org.apache.kafka.common.metrics.MetricsReporter} : The {@link ClusterResourceListener#onUpdate(ClusterResource)} method will be invoked after first {@link org.apache.kafka.clients.producer.KafkaProducer#send(ProducerRecord)} invocation for Producer metrics reporter
|
||||
* {@link org.apache.kafka.common.metrics.MetricsReporter} : The {@link ClusterResourceListener#onUpdate(ClusterResource)} method will be invoked after first {@link org.apache.kafka.clients.producer.KafkaProducer#send(org.apache.kafka.clients.producer.ProducerRecord)} invocation for Producer metrics reporter
|
||||
* and after first {@link org.apache.kafka.clients.consumer.KafkaConsumer#poll(java.time.Duration)} invocation for Consumer metrics
|
||||
* reporters. The reporter may receive metric events from the network layer before this method is invoked.
|
||||
* <h4>Broker</h4>
|
||||
|
|
|
|||
|
|
@ -17,11 +17,13 @@
|
|||
|
||||
package org.apache.kafka.common;
|
||||
|
||||
import java.util.Arrays;
|
||||
import org.apache.kafka.common.annotation.InterfaceStability;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* Options for {@link org.apache.kafka.clients.admin.Admin#electLeaders(ElectionType, Set, ElectLeadersOptions)}.
|
||||
* Options for {@link org.apache.kafka.clients.admin.Admin#electLeaders(ElectionType, Set, org.apache.kafka.clients.admin.ElectLeadersOptions)}.
|
||||
*
|
||||
* The API of this class is evolving, see {@link org.apache.kafka.clients.admin.Admin} for details.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -123,7 +123,7 @@ public abstract class SourceTask implements Task {
|
|||
* in their own system.
|
||||
* </p>
|
||||
* <p>
|
||||
* The default implementation just calls @{link commitRecord(SourceRecord)}, which is a nop by default. It is
|
||||
* The default implementation just calls {@link #commitRecord(SourceRecord)}, which is a nop by default. It is
|
||||
* not necessary to implement both methods.
|
||||
* </p>
|
||||
*
|
||||
|
|
|
|||
|
|
@ -747,7 +747,7 @@ public class IntegrationTestUtils {
|
|||
* {@link State#RUNNING} state at the same time. Note that states may change between the time
|
||||
* that this method returns and the calling function executes its next statement.<p>
|
||||
*
|
||||
* When the application is already started use {@link #waitForApplicationState(List, Duration)}
|
||||
* When the application is already started use {@link #waitForApplicationState(List, State, Duration)}
|
||||
* to wait for instances to reach {@link State#RUNNING} state.
|
||||
*
|
||||
* @param streamsList the list of streams instances to run.
|
||||
|
|
|
|||
Loading…
Reference in New Issue