mirror of https://github.com/apache/kafka.git
MINOR: Do not end Javadoc comments with `**/` (#14540)
Reviewers: Bruno Cadonna <bruno@confluent.io>, Bill Bejeck <bill@confluent.io>, Hao Li <hli@confluent.io>, Josep Prat <josep.prat@aiven.io>
This commit is contained in:
parent
abee8f711c
commit
9b468fb278
|
@ -1303,7 +1303,7 @@ public class KafkaAdminClient extends AdminClient {
|
|||
*
|
||||
* @param now The current time in milliseconds.
|
||||
* @param responses The latest responses from KafkaClient.
|
||||
**/
|
||||
*/
|
||||
private void handleResponses(long now, List<ClientResponse> responses) {
|
||||
for (ClientResponse response : responses) {
|
||||
int correlationId = response.requestHeader().correlationId();
|
||||
|
|
|
@ -2237,7 +2237,7 @@ public class KafkaConsumer<K, V> implements Consumer<K, V> {
|
|||
* @return This {@code Consumer} instance's current lag for the given partition.
|
||||
*
|
||||
* @throws IllegalStateException if the {@code topicPartition} is not assigned
|
||||
**/
|
||||
*/
|
||||
@Override
|
||||
public OptionalLong currentLag(TopicPartition topicPartition) {
|
||||
acquireAndEnsureOpen();
|
||||
|
|
|
@ -445,7 +445,7 @@ public class CommitRequestManager implements RequestManager {
|
|||
* {@code inflightOffsetFetches} to bookkeep all the inflight requests.
|
||||
* Note: Sendable requests are determined by their timer as we are expecting backoff on failed attempt. See
|
||||
* {@link RequestState}.
|
||||
**/
|
||||
*/
|
||||
List<NetworkClientDelegate.UnsentRequest> drain(final long currentTimeMs) {
|
||||
List<NetworkClientDelegate.UnsentRequest> unsentRequests = new ArrayList<>();
|
||||
|
||||
|
|
|
@ -66,7 +66,7 @@ public class FetchMetricsRegistry {
|
|||
|
||||
public FetchMetricsRegistry(Set<String> tags, String metricGrpPrefix) {
|
||||
|
||||
/***** Client level *****/
|
||||
/* Client level */
|
||||
String groupName = metricGrpPrefix + "-fetch-manager-metrics";
|
||||
|
||||
this.fetchSizeAvg = new MetricNameTemplate("fetch-size-avg", groupName,
|
||||
|
@ -105,7 +105,7 @@ public class FetchMetricsRegistry {
|
|||
this.fetchThrottleTimeMax = new MetricNameTemplate("fetch-throttle-time-max", groupName,
|
||||
"The maximum throttle time in ms", tags);
|
||||
|
||||
/***** Topic level *****/
|
||||
/* Topic level */
|
||||
Set<String> topicTags = new LinkedHashSet<>(tags);
|
||||
topicTags.add("topic");
|
||||
|
||||
|
@ -125,7 +125,7 @@ public class FetchMetricsRegistry {
|
|||
this.topicRecordsConsumedTotal = new MetricNameTemplate("records-consumed-total", groupName,
|
||||
"The total number of records consumed for a topic", topicTags);
|
||||
|
||||
/***** Partition level *****/
|
||||
/* Partition level */
|
||||
Set<String> partitionTags = new HashSet<>(topicTags);
|
||||
partitionTags.add("partition");
|
||||
this.partitionRecordsLag = new MetricNameTemplate("records-lag", groupName,
|
||||
|
|
|
@ -325,7 +325,7 @@ public class OffsetsRequestManager implements RequestManager, ClusterResourceLis
|
|||
/**
|
||||
* Build ListOffsets request to send to a specific broker for the partitions and
|
||||
* target timestamps. This also adds the request to the list of unsentRequests.
|
||||
**/
|
||||
*/
|
||||
private CompletableFuture<ListOffsetResult> buildListOffsetRequestToNode(
|
||||
Node node,
|
||||
Map<TopicPartition, ListOffsetsRequestData.ListOffsetsPartition> targetTimes,
|
||||
|
@ -477,7 +477,7 @@ public class OffsetsRequestManager implements RequestManager, ClusterResourceLis
|
|||
/**
|
||||
* Build OffsetsForLeaderEpoch request to send to a specific broker for the partitions and
|
||||
* positions to fetch. This also adds the request to the list of unsentRequests.
|
||||
**/
|
||||
*/
|
||||
private CompletableFuture<OffsetsForLeaderEpochUtils.OffsetForEpochResult> buildOffsetsForLeaderEpochRequestToNode(
|
||||
final Node node,
|
||||
final Map<TopicPartition, SubscriptionState.FetchPosition> fetchPositions,
|
||||
|
|
|
@ -76,7 +76,7 @@ public class SenderMetricsRegistry {
|
|||
this.tags = this.metrics.config().tags().keySet();
|
||||
this.allTemplates = new ArrayList<>();
|
||||
|
||||
/***** Client level *****/
|
||||
/* Client level */
|
||||
|
||||
this.batchSizeAvg = createMetricName("batch-size-avg",
|
||||
"The average number of bytes sent per partition per-request.");
|
||||
|
@ -125,7 +125,7 @@ public class SenderMetricsRegistry {
|
|||
this.produceThrottleTimeMax = createMetricName("produce-throttle-time-max",
|
||||
"The maximum time in ms a request was throttled by a broker");
|
||||
|
||||
/***** Topic level *****/
|
||||
/* Topic level */
|
||||
this.topicTags = new LinkedHashSet<>(tags);
|
||||
this.topicTags.add("topic");
|
||||
|
||||
|
@ -160,7 +160,7 @@ public class SenderMetricsRegistry {
|
|||
return createTemplate(name, TOPIC_METRIC_GROUP_NAME, description, this.topicTags);
|
||||
}
|
||||
|
||||
/** topic level metrics **/
|
||||
/* topic level metrics */
|
||||
public MetricName topicRecordSendRate(Map<String, String> tags) {
|
||||
return this.metrics.metricInstance(this.topicRecordSendRate, tags);
|
||||
}
|
||||
|
|
|
@ -140,7 +140,7 @@ public enum ApiKeys {
|
|||
/** indicates the minimum required inter broker magic required to support the API */
|
||||
public final byte minRequiredInterBrokerMagic;
|
||||
|
||||
/** indicates whether the API is enabled for forwarding **/
|
||||
/** indicates whether the API is enabled for forwarding */
|
||||
public final boolean forwardable;
|
||||
|
||||
public final boolean requiresDelayedAllocation;
|
||||
|
|
|
@ -27,6 +27,6 @@ public interface ConnectClusterDetails {
|
|||
* Get the cluster ID of the Kafka cluster backing this Connect cluster.
|
||||
*
|
||||
* @return the cluster ID of the Kafka cluster backing this Connect cluster
|
||||
**/
|
||||
*/
|
||||
String kafkaClusterId();
|
||||
}
|
|
@ -65,7 +65,7 @@ public interface ConnectClusterState {
|
|||
* Get details about the setup of the Connect cluster.
|
||||
* @return a {@link ConnectClusterDetails} object containing information about the cluster
|
||||
* @throws java.lang.UnsupportedOperationException if the default implementation has not been overridden
|
||||
**/
|
||||
*/
|
||||
default ConnectClusterDetails clusterDetails() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
|
|
@ -50,10 +50,10 @@ public interface Transformation<R extends ConnectRecord<R>> extends Configurable
|
|||
*/
|
||||
R apply(R record);
|
||||
|
||||
/** Configuration specification for this transformation. **/
|
||||
/** Configuration specification for this transformation. */
|
||||
ConfigDef config();
|
||||
|
||||
/** Signal that this transformation instance will no longer will be used. **/
|
||||
/** Signal that this transformation instance will no longer will be used. */
|
||||
@Override
|
||||
void close();
|
||||
|
||||
|
|
|
@ -123,7 +123,7 @@ public class ConnectMetricsRegistry {
|
|||
}
|
||||
|
||||
public ConnectMetricsRegistry(Set<String> tags) {
|
||||
/***** Connector level *****/
|
||||
/* Connector level */
|
||||
Set<String> connectorTags = new LinkedHashSet<>(tags);
|
||||
connectorTags.add(CONNECTOR_TAG_NAME);
|
||||
|
||||
|
@ -137,7 +137,7 @@ public class ConnectMetricsRegistry {
|
|||
connectorVersion = createTemplate("connector-version", CONNECTOR_GROUP_NAME,
|
||||
"The version of the connector class, as reported by the connector.", connectorTags);
|
||||
|
||||
/***** Worker task level *****/
|
||||
/* Worker task level */
|
||||
Set<String> workerTaskTags = new LinkedHashSet<>(tags);
|
||||
workerTaskTags.add(CONNECTOR_TAG_NAME);
|
||||
workerTaskTags.add(TASK_TAG_NAME);
|
||||
|
@ -165,7 +165,7 @@ public class ConnectMetricsRegistry {
|
|||
"The average percentage of this task's offset commit attempts that succeeded.",
|
||||
workerTaskTags);
|
||||
|
||||
/***** Source worker task level *****/
|
||||
/* Source worker task level */
|
||||
Set<String> sourceTaskTags = new LinkedHashSet<>(tags);
|
||||
sourceTaskTags.add(CONNECTOR_TAG_NAME);
|
||||
sourceTaskTags.add(TASK_TAG_NAME);
|
||||
|
@ -219,7 +219,7 @@ public class ConnectMetricsRegistry {
|
|||
"The average number of records in the transactions the task has committed so far.",
|
||||
sourceTaskTags);
|
||||
|
||||
/***** Sink worker task level *****/
|
||||
/* Sink worker task level */
|
||||
Set<String> sinkTaskTags = new LinkedHashSet<>(tags);
|
||||
sinkTaskTags.add(CONNECTOR_TAG_NAME);
|
||||
sinkTaskTags.add(TASK_TAG_NAME);
|
||||
|
@ -286,7 +286,7 @@ public class ConnectMetricsRegistry {
|
|||
+ "committed/flushed/acknowledged by the sink task.",
|
||||
sinkTaskTags);
|
||||
|
||||
/***** Worker level *****/
|
||||
/* Worker level */
|
||||
Set<String> workerTags = new LinkedHashSet<>(tags);
|
||||
|
||||
connectorCount = createTemplate("connector-count", WORKER_GROUP_NAME, "The number of connectors run in this worker.", workerTags);
|
||||
|
@ -341,7 +341,7 @@ public class ConnectMetricsRegistry {
|
|||
connectorStatusMetrics.put(connectorRestartingTaskCount, TaskStatus.State.RESTARTING);
|
||||
connectorStatusMetrics = Collections.unmodifiableMap(connectorStatusMetrics);
|
||||
|
||||
/***** Worker rebalance level *****/
|
||||
/* Worker rebalance level */
|
||||
Set<String> rebalanceTags = new LinkedHashSet<>(tags);
|
||||
|
||||
connectProtocol = createTemplate("connect-protocol", WORKER_REBALANCE_GROUP_NAME, "The Connect protocol used by this cluster", rebalanceTags);
|
||||
|
@ -358,7 +358,7 @@ public class ConnectMetricsRegistry {
|
|||
rebalanceTimeSinceLast = createTemplate("time-since-last-rebalance-ms", WORKER_REBALANCE_GROUP_NAME,
|
||||
"The time in milliseconds since this worker completed the most recent rebalance.", rebalanceTags);
|
||||
|
||||
/***** Task Error Handling Metrics *****/
|
||||
/* Task Error Handling Metrics */
|
||||
Set<String> taskErrorHandlingTags = new LinkedHashSet<>(tags);
|
||||
taskErrorHandlingTags.add(CONNECTOR_TAG_NAME);
|
||||
taskErrorHandlingTags.add(TASK_TAG_NAME);
|
||||
|
|
|
@ -370,7 +370,7 @@ class WorkerSinkTask extends WorkerTask {
|
|||
/**
|
||||
* Starts an offset commit by flushing outstanding messages from the task and then starting
|
||||
* the write commit.
|
||||
**/
|
||||
*/
|
||||
private void doCommit(Map<TopicPartition, OffsetAndMetadata> offsets, boolean closing, int seqno) {
|
||||
if (isCancelled()) {
|
||||
log.debug("Skipping final offset commit as task has been cancelled");
|
||||
|
|
|
@ -74,7 +74,7 @@ import java.util.stream.Stream;
|
|||
* will generate two invocations of "someTest" (since ClusterType.Both was given). For each invocation, the test class
|
||||
* SomeIntegrationTest will be instantiated, lifecycle methods (before/after) will be run, and "someTest" will be invoked.
|
||||
*
|
||||
**/
|
||||
*/
|
||||
public class ClusterTestExtensions implements TestTemplateInvocationContextProvider {
|
||||
@Override
|
||||
public boolean supportsTestTemplate(ExtensionContext context) {
|
||||
|
|
|
@ -293,7 +293,7 @@ public class ByteUtilsBenchmark {
|
|||
}
|
||||
|
||||
|
||||
/******************* Implementations **********************/
|
||||
/* Implementations */
|
||||
|
||||
/*
|
||||
* Implementation in Trunk as of Apr 2023 / v3.4
|
||||
|
|
|
@ -639,7 +639,7 @@ public class StateDirectoryTest {
|
|||
}
|
||||
}
|
||||
|
||||
/************* Named Topology Tests *************/
|
||||
/* Named Topology Tests */
|
||||
|
||||
@Test
|
||||
public void shouldCreateTaskDirectoriesUnderNamedTopologyDirs() throws IOException {
|
||||
|
@ -767,8 +767,6 @@ public class StateDirectoryTest {
|
|||
}
|
||||
}
|
||||
|
||||
/************************************************/
|
||||
|
||||
@Test
|
||||
public void shouldPersistProcessIdAcrossRestart() {
|
||||
final UUID processId = directory.initializeProcessId();
|
||||
|
|
|
@ -73,7 +73,7 @@ public class StreamsAssignmentScaleTest {
|
|||
|
||||
private final Logger log = LoggerFactory.getLogger(StreamsAssignmentScaleTest.class);
|
||||
|
||||
/************ HighAvailabilityTaskAssignor tests ************/
|
||||
/* HighAvailabilityTaskAssignor tests */
|
||||
|
||||
@Test(timeout = 120 * 1000)
|
||||
public void testHighAvailabilityTaskAssignorLargePartitionCount() {
|
||||
|
@ -95,7 +95,7 @@ public class StreamsAssignmentScaleTest {
|
|||
completeLargeAssignment(1_000, 10, 1000, 1, HighAvailabilityTaskAssignor.class);
|
||||
}
|
||||
|
||||
/************ StickyTaskAssignor tests ************/
|
||||
/* StickyTaskAssignor tests */
|
||||
|
||||
@Test(timeout = 120 * 1000)
|
||||
public void testStickyTaskAssignorLargePartitionCount() {
|
||||
|
@ -117,7 +117,7 @@ public class StreamsAssignmentScaleTest {
|
|||
completeLargeAssignment(1_000, 10, 1000, 1, StickyTaskAssignor.class);
|
||||
}
|
||||
|
||||
/************ FallbackPriorTaskAssignor tests ************/
|
||||
/* FallbackPriorTaskAssignor tests */
|
||||
|
||||
@Test(timeout = 120 * 1000)
|
||||
public void testFallbackPriorTaskAssignorLargePartitionCount() {
|
||||
|
|
|
@ -290,7 +290,7 @@ public class VerifiableProducer implements AutoCloseable {
|
|||
}
|
||||
}
|
||||
|
||||
/** Returns a string to publish: ether 'valuePrefix'.'val' or 'val' **/
|
||||
/** Returns a string to publish: ether 'valuePrefix'.'val' or 'val' */
|
||||
public String getValue(long val) {
|
||||
if (this.valuePrefix != null) {
|
||||
return String.format("%d.%d", this.valuePrefix, val);
|
||||
|
|
Loading…
Reference in New Issue