mirror of https://github.com/apache/kafka.git
KAFKA-16643 Add ModifierOrder checkstyle rule (#15890)
Reviewers: Chia-Ping Tsai <chia7712@gmail.com>
This commit is contained in:
parent
103ff5c0f0
commit
596b945072
|
@ -150,6 +150,9 @@
|
|||
|
||||
<!-- Allows the use of the @SuppressWarnings annotation in the code -->
|
||||
<module name="SuppressWarningsHolder"/>
|
||||
|
||||
<module name="ModifierOrder"/>
|
||||
|
||||
</module>
|
||||
|
||||
<module name="SuppressionFilter">
|
||||
|
|
|
@ -37,10 +37,10 @@ import java.util.Map;
|
|||
*
|
||||
*/
|
||||
final class ClusterConnectionStates {
|
||||
final static int RECONNECT_BACKOFF_EXP_BASE = 2;
|
||||
final static double RECONNECT_BACKOFF_JITTER = 0.2;
|
||||
final static int CONNECTION_SETUP_TIMEOUT_EXP_BASE = 2;
|
||||
final static double CONNECTION_SETUP_TIMEOUT_JITTER = 0.2;
|
||||
static final int RECONNECT_BACKOFF_EXP_BASE = 2;
|
||||
static final double RECONNECT_BACKOFF_JITTER = 0.2;
|
||||
static final int CONNECTION_SETUP_TIMEOUT_EXP_BASE = 2;
|
||||
static final double CONNECTION_SETUP_TIMEOUT_JITTER = 0.2;
|
||||
private final Map<String, NodeConnectionState> nodeState;
|
||||
private final Logger log;
|
||||
private final HostResolver hostResolver;
|
||||
|
|
|
@ -32,7 +32,7 @@ import java.util.stream.Collectors;
|
|||
*/
|
||||
@InterfaceStability.Evolving
|
||||
public class CreateTopicsResult {
|
||||
final static int UNKNOWN = -1;
|
||||
static final int UNKNOWN = -1;
|
||||
|
||||
private final Map<String, KafkaFuture<TopicMetadataAndConfig>> futures;
|
||||
|
||||
|
|
|
@ -66,7 +66,7 @@ public class DescribeReplicaLogDirsResult {
|
|||
});
|
||||
}
|
||||
|
||||
static public class ReplicaLogDirInfo {
|
||||
public static class ReplicaLogDirInfo {
|
||||
// The current log directory of the replica of this partition on the given broker.
|
||||
// Null if no replica is not found for this partition on the given broker.
|
||||
private final String currentReplicaLogDir;
|
||||
|
|
|
@ -28,5 +28,5 @@ import java.util.Set;
|
|||
* The API of this class is evolving, see {@link Admin} for details.
|
||||
*/
|
||||
@InterfaceStability.Evolving
|
||||
final public class ElectLeadersOptions extends AbstractOptions<ElectLeadersOptions> {
|
||||
public final class ElectLeadersOptions extends AbstractOptions<ElectLeadersOptions> {
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ import org.apache.kafka.common.internals.KafkaFutureImpl;
|
|||
* The API of this class is evolving, see {@link Admin} for details.
|
||||
*/
|
||||
@InterfaceStability.Evolving
|
||||
final public class ElectLeadersResult {
|
||||
public final class ElectLeadersResult {
|
||||
private final KafkaFuture<Map<TopicPartition, Optional<Throwable>>> electionFuture;
|
||||
|
||||
ElectLeadersResult(KafkaFuture<Map<TopicPartition, Optional<Throwable>>> electionFuture) {
|
||||
|
|
|
@ -3483,7 +3483,7 @@ public class KafkaAdminClient extends AdminClient {
|
|||
.collect(Collectors.toSet());
|
||||
}
|
||||
|
||||
private final static class ListConsumerGroupsResults {
|
||||
private static final class ListConsumerGroupsResults {
|
||||
private final List<Throwable> errors;
|
||||
private final HashMap<String, ConsumerGroupListing> listings;
|
||||
private final HashSet<Node> remaining;
|
||||
|
|
|
@ -34,7 +34,7 @@ public enum TransactionState {
|
|||
PREPARE_EPOCH_FENCE("PrepareEpochFence"),
|
||||
UNKNOWN("Unknown");
|
||||
|
||||
private final static Map<String, TransactionState> NAME_TO_ENUM = Arrays.stream(values())
|
||||
private static final Map<String, TransactionState> NAME_TO_ENUM = Arrays.stream(values())
|
||||
.collect(Collectors.toMap(state -> state.name, Function.identity()));
|
||||
|
||||
private final String name;
|
||||
|
|
|
@ -27,7 +27,7 @@ import java.util.Collections;
|
|||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
final public class AdminBootstrapAddresses {
|
||||
public final class AdminBootstrapAddresses {
|
||||
private final boolean usingBootstrapControllers;
|
||||
private final List<InetSocketAddress> addresses;
|
||||
|
||||
|
|
|
@ -26,10 +26,10 @@ import java.util.Optional;
|
|||
* Note: Any change to this class is considered public and requires a KIP.
|
||||
*/
|
||||
public class ConsumerGroupMetadata {
|
||||
final private String groupId;
|
||||
final private int generationId;
|
||||
final private String memberId;
|
||||
final private Optional<String> groupInstanceId;
|
||||
private final String groupId;
|
||||
private final int generationId;
|
||||
private final String memberId;
|
||||
private final Optional<String> groupInstanceId;
|
||||
|
||||
public ConsumerGroupMetadata(String groupId,
|
||||
int generationId,
|
||||
|
|
|
@ -523,7 +523,7 @@ import static org.apache.kafka.common.utils.Utils.propsToMap;
|
|||
*/
|
||||
public class KafkaConsumer<K, V> implements Consumer<K, V> {
|
||||
|
||||
private final static ConsumerDelegateCreator CREATOR = new ConsumerDelegateCreator();
|
||||
private static final ConsumerDelegateCreator CREATOR = new ConsumerDelegateCreator();
|
||||
|
||||
private final ConsumerDelegate<K, V> delegate;
|
||||
|
||||
|
|
|
@ -332,7 +332,7 @@ import static org.apache.kafka.common.utils.Utils.propsToMap;
|
|||
@InterfaceStability.Evolving
|
||||
public class KafkaShareConsumer<K, V> implements ShareConsumer<K, V> {
|
||||
|
||||
private final static ShareConsumerDelegateCreator CREATOR = new ShareConsumerDelegateCreator();
|
||||
private static final ShareConsumerDelegateCreator CREATOR = new ShareConsumerDelegateCreator();
|
||||
|
||||
private final ShareConsumerDelegate<K, V> delegate;
|
||||
|
||||
|
|
|
@ -85,7 +85,7 @@ public abstract class AbstractStickyAssignor extends AbstractPartitionAssignor {
|
|||
}
|
||||
}
|
||||
|
||||
abstract protected MemberData memberData(Subscription subscription);
|
||||
protected abstract MemberData memberData(Subscription subscription);
|
||||
|
||||
@Override
|
||||
public Map<String, List<TopicPartition>> assignPartitions(Map<String, List<PartitionInfo>> partitionsPerTopic,
|
||||
|
|
|
@ -96,7 +96,7 @@ import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.refreshC
|
|||
* This class manages the coordination process with the consumer coordinator.
|
||||
*/
|
||||
public final class ConsumerCoordinator extends AbstractCoordinator {
|
||||
private final static TopicPartitionComparator COMPARATOR = new TopicPartitionComparator();
|
||||
private static final TopicPartitionComparator COMPARATOR = new TopicPartitionComparator();
|
||||
|
||||
private final GroupRebalanceConfig rebalanceConfig;
|
||||
private final Logger log;
|
||||
|
|
|
@ -118,13 +118,13 @@ public class MembershipManagerImpl implements MembershipManager {
|
|||
/**
|
||||
* TopicPartition comparator based on topic name and partition id.
|
||||
*/
|
||||
final static TopicPartitionComparator TOPIC_PARTITION_COMPARATOR = new TopicPartitionComparator();
|
||||
static final TopicPartitionComparator TOPIC_PARTITION_COMPARATOR = new TopicPartitionComparator();
|
||||
|
||||
/**
|
||||
* TopicIdPartition comparator based on topic name and partition id (ignoring ID while sorting,
|
||||
* as this is sorted mainly for logging purposes).
|
||||
*/
|
||||
final static TopicIdPartitionComparator TOPIC_ID_PARTITION_COMPARATOR = new TopicIdPartitionComparator();
|
||||
static final TopicIdPartitionComparator TOPIC_ID_PARTITION_COMPARATOR = new TopicIdPartitionComparator();
|
||||
|
||||
/**
|
||||
* Group ID of the consumer group the member will be part of, provided when creating the current
|
||||
|
|
|
@ -24,8 +24,8 @@ class RequestState {
|
|||
|
||||
private final Logger log;
|
||||
protected final String owner;
|
||||
final static int RETRY_BACKOFF_EXP_BASE = 2;
|
||||
final static double RETRY_BACKOFF_JITTER = 0.2;
|
||||
static final int RETRY_BACKOFF_EXP_BASE = 2;
|
||||
static final double RETRY_BACKOFF_JITTER = 0.2;
|
||||
protected final ExponentialBackoff exponentialBackoff;
|
||||
protected long lastSentMs = -1;
|
||||
protected long lastReceivedMs = -1;
|
||||
|
|
|
@ -26,7 +26,7 @@ import java.util.Map;
|
|||
|
||||
public final class Utils {
|
||||
|
||||
final static class PartitionComparator implements Comparator<TopicPartition>, Serializable {
|
||||
static final class PartitionComparator implements Comparator<TopicPartition>, Serializable {
|
||||
private static final long serialVersionUID = 1L;
|
||||
private final Map<String, List<String>> map;
|
||||
|
||||
|
@ -46,7 +46,7 @@ public final class Utils {
|
|||
}
|
||||
}
|
||||
|
||||
public final static class TopicPartitionComparator implements Comparator<TopicPartition>, Serializable {
|
||||
public static final class TopicPartitionComparator implements Comparator<TopicPartition>, Serializable {
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
@Override
|
||||
|
@ -62,7 +62,7 @@ public final class Utils {
|
|||
}
|
||||
}
|
||||
|
||||
public final static class TopicIdPartitionComparator implements Comparator<TopicIdPartition>, Serializable {
|
||||
public static final class TopicIdPartitionComparator implements Comparator<TopicIdPartition>, Serializable {
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
/**
|
||||
|
|
|
@ -45,7 +45,7 @@ public class BuiltInPartitioner {
|
|||
private final AtomicReference<StickyPartitionInfo> stickyPartitionInfo = new AtomicReference<>();
|
||||
|
||||
// Visible and used for testing only.
|
||||
static volatile public Supplier<Integer> mockRandom = null;
|
||||
public static volatile Supplier<Integer> mockRandom = null;
|
||||
|
||||
/**
|
||||
* BuiltInPartitioner constructor.
|
||||
|
@ -331,7 +331,7 @@ public class BuiltInPartitioner {
|
|||
/**
|
||||
* The partition load stats for each topic that are used for adaptive partition distribution.
|
||||
*/
|
||||
private final static class PartitionLoadStats {
|
||||
private static final class PartitionLoadStats {
|
||||
public final int[] cumulativeFrequencyTable;
|
||||
public final int[] partitionIds;
|
||||
public final int length;
|
||||
|
|
|
@ -395,7 +395,7 @@ public final class ProducerBatch {
|
|||
/**
|
||||
* A callback and the associated FutureRecordMetadata argument to pass to it.
|
||||
*/
|
||||
final private static class Thunk {
|
||||
private static final class Thunk {
|
||||
final Callback callback;
|
||||
final FutureRecordMetadata future;
|
||||
|
||||
|
|
|
@ -1208,7 +1208,7 @@ public class RecordAccumulator {
|
|||
/*
|
||||
* Metadata about a record just appended to the record accumulator
|
||||
*/
|
||||
public final static class RecordAppendResult {
|
||||
public static final class RecordAppendResult {
|
||||
public final FutureRecordMetadata future;
|
||||
public final boolean batchIsFull;
|
||||
public final boolean newBatchCreated;
|
||||
|
@ -1242,7 +1242,7 @@ public class RecordAccumulator {
|
|||
/*
|
||||
* The set of nodes that have at least one complete record batch in the accumulator
|
||||
*/
|
||||
public final static class ReadyCheckResult {
|
||||
public static final class ReadyCheckResult {
|
||||
public final Set<Node> readyNodes;
|
||||
public final long nextReadyCheckDelayMs;
|
||||
public final Set<String> unknownLeaderTopics;
|
||||
|
@ -1270,9 +1270,9 @@ public class RecordAccumulator {
|
|||
* Node latency stats for each node that are used for adaptive partition distribution
|
||||
* Visible for testing
|
||||
*/
|
||||
public final static class NodeLatencyStats {
|
||||
volatile public long readyTimeMs; // last time the node had batches ready to send
|
||||
volatile public long drainTimeMs; // last time the node was able to drain batches
|
||||
public static final class NodeLatencyStats {
|
||||
public volatile long readyTimeMs; // last time the node had batches ready to send
|
||||
public volatile long drainTimeMs; // last time the node was able to drain batches
|
||||
|
||||
NodeLatencyStats(long nowMs) {
|
||||
readyTimeMs = nowMs;
|
||||
|
|
|
@ -30,7 +30,7 @@ import org.apache.kafka.common.metrics.Sensor;
|
|||
|
||||
public class SenderMetricsRegistry {
|
||||
|
||||
final static String TOPIC_METRIC_GROUP_NAME = "producer-topic-metrics";
|
||||
static final String TOPIC_METRIC_GROUP_NAME = "producer-topic-metrics";
|
||||
|
||||
private final List<MetricNameTemplate> allTemplates;
|
||||
|
||||
|
|
|
@ -480,7 +480,7 @@ public class TransactionManager {
|
|||
return producerIdAndEpoch;
|
||||
}
|
||||
|
||||
synchronized public void maybeUpdateProducerIdAndEpoch(TopicPartition topicPartition) {
|
||||
public synchronized void maybeUpdateProducerIdAndEpoch(TopicPartition topicPartition) {
|
||||
if (hasFatalError()) {
|
||||
log.debug("Ignoring producer ID and epoch update request since the producer is in fatal error state");
|
||||
return;
|
||||
|
|
|
@ -36,7 +36,7 @@ public enum ConsumerGroupState {
|
|||
ASSIGNING("Assigning"),
|
||||
RECONCILING("Reconciling");
|
||||
|
||||
private final static Map<String, ConsumerGroupState> NAME_TO_ENUM = Arrays.stream(values())
|
||||
private static final Map<String, ConsumerGroupState> NAME_TO_ENUM = Arrays.stream(values())
|
||||
.collect(Collectors.toMap(state -> state.name.toUpperCase(Locale.ROOT), Function.identity()));
|
||||
|
||||
private final String name;
|
||||
|
|
|
@ -27,7 +27,7 @@ public enum GroupType {
|
|||
CONSUMER("Consumer"),
|
||||
CLASSIC("Classic");
|
||||
|
||||
private final static Map<String, GroupType> NAME_TO_ENUM = Arrays.stream(values())
|
||||
private static final Map<String, GroupType> NAME_TO_ENUM = Arrays.stream(values())
|
||||
.collect(Collectors.toMap(type -> type.name.toLowerCase(Locale.ROOT), Function.identity()));
|
||||
|
||||
private final String name;
|
||||
|
|
|
@ -21,7 +21,7 @@ package org.apache.kafka.common;
|
|||
*/
|
||||
public class KafkaException extends RuntimeException {
|
||||
|
||||
private final static long serialVersionUID = 1L;
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
public KafkaException(String message, Throwable cause) {
|
||||
super(message, cause);
|
||||
|
|
|
@ -53,7 +53,7 @@ public abstract class KafkaFuture<T> implements Future<T> {
|
|||
* @deprecated Since Kafka 3.0. Use the {@link BaseFunction} functional interface.
|
||||
*/
|
||||
@Deprecated
|
||||
public static abstract class Function<A, B> implements BaseFunction<A, B> { }
|
||||
public abstract static class Function<A, B> implements BaseFunction<A, B> { }
|
||||
|
||||
/**
|
||||
* A consumer of two different types of object.
|
||||
|
|
|
@ -32,7 +32,7 @@ public enum ShareGroupState {
|
|||
DEAD("Dead"),
|
||||
EMPTY("Empty");
|
||||
|
||||
private final static Map<String, ShareGroupState> NAME_TO_ENUM = Arrays.stream(values())
|
||||
private static final Map<String, ShareGroupState> NAME_TO_ENUM = Arrays.stream(values())
|
||||
.collect(Collectors.toMap(state -> state.name.toUpperCase(Locale.ROOT), Function.identity()));
|
||||
|
||||
private final String name;
|
||||
|
|
|
@ -121,7 +121,7 @@ public enum AclOperation {
|
|||
// Note: we cannot have more than 30 ACL operations without modifying the format used
|
||||
// to describe ACL operations in MetadataResponse.
|
||||
|
||||
private final static HashMap<Byte, AclOperation> CODE_TO_VALUE = new HashMap<>();
|
||||
private static final HashMap<Byte, AclOperation> CODE_TO_VALUE = new HashMap<>();
|
||||
|
||||
static {
|
||||
for (AclOperation operation : AclOperation.values()) {
|
||||
|
|
|
@ -50,7 +50,7 @@ public enum AclPermissionType {
|
|||
*/
|
||||
ALLOW((byte) 3);
|
||||
|
||||
private final static HashMap<Byte, AclPermissionType> CODE_TO_VALUE = new HashMap<>();
|
||||
private static final HashMap<Byte, AclPermissionType> CODE_TO_VALUE = new HashMap<>();
|
||||
|
||||
static {
|
||||
for (AclPermissionType permissionType : AclPermissionType.values()) {
|
||||
|
|
|
@ -112,7 +112,7 @@ public class BrokerSecurityConfigs {
|
|||
public static final String SSL_ALLOW_SAN_CHANGES_DOC = "Indicates whether changes to the certificate subject alternative names should be allowed during " +
|
||||
"a dynamic reconfiguration of certificates or not.";
|
||||
|
||||
public final static String SASL_MECHANISM_INTER_BROKER_PROTOCOL_CONFIG = "sasl.mechanism.inter.broker.protocol";
|
||||
public final static String SASL_MECHANISM_INTER_BROKER_PROTOCOL_DOC = "SASL mechanism used for inter-broker communication. Default is GSSAPI.";
|
||||
public static final String SASL_MECHANISM_INTER_BROKER_PROTOCOL_CONFIG = "sasl.mechanism.inter.broker.protocol";
|
||||
public static final String SASL_MECHANISM_INTER_BROKER_PROTOCOL_DOC = "SASL mechanism used for inter-broker communication. Default is GSSAPI.";
|
||||
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ package org.apache.kafka.common.errors;
|
|||
*/
|
||||
public class TransactionAbortedException extends ApiException {
|
||||
|
||||
private final static long serialVersionUID = 1L;
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
public TransactionAbortedException(String message, Throwable cause) {
|
||||
super(message, cause);
|
||||
|
|
|
@ -27,7 +27,7 @@ import org.apache.kafka.common.utils.Exit;
|
|||
*/
|
||||
public class FatalExitError extends Error {
|
||||
|
||||
private final static long serialVersionUID = 1L;
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
private final int statusCode;
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ import java.util.Objects;
|
|||
* The schema for a compound record definition
|
||||
*/
|
||||
public class Schema extends Type {
|
||||
private final static Object[] NO_VALUES = new Object[0];
|
||||
private static final Object[] NO_VALUES = new Object[0];
|
||||
|
||||
private final BoundField[] fields;
|
||||
private final Map<String, BoundField> fieldsByName;
|
||||
|
@ -229,7 +229,7 @@ public class Schema extends Type {
|
|||
/**
|
||||
* Override one or more of the visit methods with the desired logic.
|
||||
*/
|
||||
public static abstract class Visitor {
|
||||
public abstract static class Visitor {
|
||||
public void visit(Schema schema) {}
|
||||
public void visit(Type field) {}
|
||||
}
|
||||
|
|
|
@ -82,7 +82,7 @@ public abstract class Type {
|
|||
/**
|
||||
* A Type that can return its description for documentation purposes.
|
||||
*/
|
||||
public static abstract class DocumentedType extends Type {
|
||||
public abstract static class DocumentedType extends Type {
|
||||
|
||||
/**
|
||||
* Short name of the type to identify it in documentation;
|
||||
|
|
|
@ -354,7 +354,7 @@ public class MemoryRecords extends AbstractRecords {
|
|||
return buffer.hashCode();
|
||||
}
|
||||
|
||||
public static abstract class RecordFilter {
|
||||
public abstract static class RecordFilter {
|
||||
public final long currentTime;
|
||||
public final long deleteRetentionMs;
|
||||
|
||||
|
|
|
@ -51,7 +51,7 @@ public abstract class AbstractControlRequest extends AbstractRequest {
|
|||
|
||||
public static final long UNKNOWN_BROKER_EPOCH = -1L;
|
||||
|
||||
public static abstract class Builder<T extends AbstractRequest> extends AbstractRequest.Builder<T> {
|
||||
public abstract static class Builder<T extends AbstractRequest> extends AbstractRequest.Builder<T> {
|
||||
protected final int controllerId;
|
||||
protected final int controllerEpoch;
|
||||
protected final long brokerEpoch;
|
||||
|
|
|
@ -29,7 +29,7 @@ import java.util.Map;
|
|||
|
||||
public abstract class AbstractRequest implements AbstractRequestResponse {
|
||||
|
||||
public static abstract class Builder<T extends AbstractRequest> {
|
||||
public abstract static class Builder<T extends AbstractRequest> {
|
||||
private final ApiKeys apiKey;
|
||||
private final short oldestAllowedVersion;
|
||||
private final short latestAllowedVersion;
|
||||
|
|
|
@ -82,7 +82,7 @@ public class DescribeLogDirsResponse extends AbstractResponse {
|
|||
* class {@link org.apache.kafka.clients.admin.LogDirDescription}.
|
||||
*/
|
||||
@Deprecated
|
||||
static public class LogDirInfo {
|
||||
public static class LogDirInfo {
|
||||
public final Errors error;
|
||||
public final Map<TopicPartition, ReplicaInfo> replicaInfos;
|
||||
|
||||
|
@ -110,7 +110,7 @@ public class DescribeLogDirsResponse extends AbstractResponse {
|
|||
* class {@link org.apache.kafka.clients.admin.ReplicaInfo}.
|
||||
*/
|
||||
@Deprecated
|
||||
static public class ReplicaInfo {
|
||||
public static class ReplicaInfo {
|
||||
|
||||
public final long size;
|
||||
public final long offsetLag;
|
||||
|
|
|
@ -28,7 +28,7 @@ import java.util.Collections;
|
|||
import java.util.Optional;
|
||||
import java.util.function.UnaryOperator;
|
||||
|
||||
final public class FetchSnapshotRequest extends AbstractRequest {
|
||||
public final class FetchSnapshotRequest extends AbstractRequest {
|
||||
private final FetchSnapshotRequestData data;
|
||||
|
||||
public FetchSnapshotRequest(FetchSnapshotRequestData data, short version) {
|
||||
|
|
|
@ -29,7 +29,7 @@ import java.util.Map;
|
|||
import java.util.Optional;
|
||||
import java.util.function.UnaryOperator;
|
||||
|
||||
final public class FetchSnapshotResponse extends AbstractResponse {
|
||||
public final class FetchSnapshotResponse extends AbstractResponse {
|
||||
private final FetchSnapshotResponseData data;
|
||||
|
||||
public FetchSnapshotResponse(FetchSnapshotResponseData data) {
|
||||
|
|
|
@ -30,7 +30,7 @@ import java.util.Objects;
|
|||
* The header for a request in the Kafka protocol
|
||||
*/
|
||||
public class RequestHeader implements AbstractRequestResponse {
|
||||
private final static int SIZE_NOT_INITIALIZED = -1;
|
||||
private static final int SIZE_NOT_INITIALIZED = -1;
|
||||
private final RequestHeaderData data;
|
||||
private final short headerVersion;
|
||||
private int size = SIZE_NOT_INITIALIZED;
|
||||
|
|
|
@ -27,7 +27,7 @@ import java.util.Objects;
|
|||
* A response header in the kafka protocol.
|
||||
*/
|
||||
public class ResponseHeader implements AbstractRequestResponse {
|
||||
private final static int SIZE_NOT_INITIALIZED = -1;
|
||||
private static final int SIZE_NOT_INITIALIZED = -1;
|
||||
private final ResponseHeaderData data;
|
||||
private final short headerVersion;
|
||||
private int size = SIZE_NOT_INITIALIZED;
|
||||
|
|
|
@ -69,13 +69,13 @@ public enum PatternType {
|
|||
*/
|
||||
PREFIXED((byte) 4);
|
||||
|
||||
private final static Map<Byte, PatternType> CODE_TO_VALUE =
|
||||
private static final Map<Byte, PatternType> CODE_TO_VALUE =
|
||||
Collections.unmodifiableMap(
|
||||
Arrays.stream(PatternType.values())
|
||||
.collect(Collectors.toMap(PatternType::code, Function.identity()))
|
||||
);
|
||||
|
||||
private final static Map<String, PatternType> NAME_TO_VALUE =
|
||||
private static final Map<String, PatternType> NAME_TO_VALUE =
|
||||
Collections.unmodifiableMap(
|
||||
Arrays.stream(PatternType.values())
|
||||
.collect(Collectors.toMap(PatternType::name, Function.identity()))
|
||||
|
|
|
@ -34,12 +34,12 @@ public class Resource {
|
|||
/**
|
||||
* The name of the CLUSTER resource.
|
||||
*/
|
||||
public final static String CLUSTER_NAME = "kafka-cluster";
|
||||
public static final String CLUSTER_NAME = "kafka-cluster";
|
||||
|
||||
/**
|
||||
* A resource representing the whole cluster.
|
||||
*/
|
||||
public final static Resource CLUSTER = new Resource(ResourceType.CLUSTER, CLUSTER_NAME);
|
||||
public static final Resource CLUSTER = new Resource(ResourceType.CLUSTER, CLUSTER_NAME);
|
||||
|
||||
/**
|
||||
* Create an instance of this class with the provided parameters.
|
||||
|
|
|
@ -70,7 +70,7 @@ public enum ResourceType {
|
|||
*/
|
||||
USER((byte) 7);
|
||||
|
||||
private final static HashMap<Byte, ResourceType> CODE_TO_VALUE = new HashMap<>();
|
||||
private static final HashMap<Byte, ResourceType> CODE_TO_VALUE = new HashMap<>();
|
||||
|
||||
static {
|
||||
for (ResourceType resourceType : ResourceType.values()) {
|
||||
|
|
|
@ -42,7 +42,7 @@ import static java.util.Objects.requireNonNull;
|
|||
*/
|
||||
public class KafkaPrincipal implements Principal {
|
||||
public static final String USER_TYPE = "User";
|
||||
public final static KafkaPrincipal ANONYMOUS = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "ANONYMOUS");
|
||||
public static final KafkaPrincipal ANONYMOUS = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "ANONYMOUS");
|
||||
|
||||
private final String principalType;
|
||||
private final String name;
|
||||
|
|
|
@ -33,7 +33,7 @@ import javax.security.sasl.SaslException;
|
|||
*/
|
||||
public class ScramMessages {
|
||||
|
||||
static abstract class AbstractScramMessage {
|
||||
abstract static class AbstractScramMessage {
|
||||
|
||||
static final String ALPHA = "[A-Za-z]+";
|
||||
static final String VALUE_SAFE = "[\\x01-\\x7F&&[^=,]]+";
|
||||
|
|
|
@ -112,9 +112,9 @@ class CommonNameLoggingTrustManagerFactoryWrapper {
|
|||
*/
|
||||
static class CommonNameLoggingTrustManager implements X509TrustManager {
|
||||
|
||||
final private X509TrustManager origTm;
|
||||
private final X509TrustManager origTm;
|
||||
final int nrOfRememberedBadCerts;
|
||||
final private LinkedHashMap<ByteBuffer, String> previouslyRejectedClientCertChains;
|
||||
private final LinkedHashMap<ByteBuffer, String> previouslyRejectedClientCertChains;
|
||||
|
||||
public CommonNameLoggingTrustManager(X509TrustManager originalTrustManager, int nrOfRememberedBadCerts) {
|
||||
this.origTm = originalTrustManager;
|
||||
|
|
|
@ -28,9 +28,9 @@ import java.util.UUID;
|
|||
*/
|
||||
public class Serdes {
|
||||
|
||||
static public class WrapperSerde<T> implements Serde<T> {
|
||||
final private Serializer<T> serializer;
|
||||
final private Deserializer<T> deserializer;
|
||||
public static class WrapperSerde<T> implements Serde<T> {
|
||||
private final Serializer<T> serializer;
|
||||
private final Deserializer<T> deserializer;
|
||||
|
||||
public WrapperSerde(Serializer<T> serializer, Deserializer<T> deserializer) {
|
||||
this.serializer = serializer;
|
||||
|
@ -60,81 +60,81 @@ public class Serdes {
|
|||
}
|
||||
}
|
||||
|
||||
static public final class VoidSerde extends WrapperSerde<Void> {
|
||||
public static final class VoidSerde extends WrapperSerde<Void> {
|
||||
public VoidSerde() {
|
||||
super(new VoidSerializer(), new VoidDeserializer());
|
||||
}
|
||||
}
|
||||
|
||||
static public final class LongSerde extends WrapperSerde<Long> {
|
||||
public static final class LongSerde extends WrapperSerde<Long> {
|
||||
public LongSerde() {
|
||||
super(new LongSerializer(), new LongDeserializer());
|
||||
}
|
||||
}
|
||||
|
||||
static public final class IntegerSerde extends WrapperSerde<Integer> {
|
||||
public static final class IntegerSerde extends WrapperSerde<Integer> {
|
||||
public IntegerSerde() {
|
||||
super(new IntegerSerializer(), new IntegerDeserializer());
|
||||
}
|
||||
}
|
||||
|
||||
static public final class ShortSerde extends WrapperSerde<Short> {
|
||||
public static final class ShortSerde extends WrapperSerde<Short> {
|
||||
public ShortSerde() {
|
||||
super(new ShortSerializer(), new ShortDeserializer());
|
||||
}
|
||||
}
|
||||
|
||||
static public final class FloatSerde extends WrapperSerde<Float> {
|
||||
public static final class FloatSerde extends WrapperSerde<Float> {
|
||||
public FloatSerde() {
|
||||
super(new FloatSerializer(), new FloatDeserializer());
|
||||
}
|
||||
}
|
||||
|
||||
static public final class DoubleSerde extends WrapperSerde<Double> {
|
||||
public static final class DoubleSerde extends WrapperSerde<Double> {
|
||||
public DoubleSerde() {
|
||||
super(new DoubleSerializer(), new DoubleDeserializer());
|
||||
}
|
||||
}
|
||||
|
||||
static public final class StringSerde extends WrapperSerde<String> {
|
||||
public static final class StringSerde extends WrapperSerde<String> {
|
||||
public StringSerde() {
|
||||
super(new StringSerializer(), new StringDeserializer());
|
||||
}
|
||||
}
|
||||
|
||||
static public final class ByteBufferSerde extends WrapperSerde<ByteBuffer> {
|
||||
public static final class ByteBufferSerde extends WrapperSerde<ByteBuffer> {
|
||||
public ByteBufferSerde() {
|
||||
super(new ByteBufferSerializer(), new ByteBufferDeserializer());
|
||||
}
|
||||
}
|
||||
|
||||
static public final class BytesSerde extends WrapperSerde<Bytes> {
|
||||
public static final class BytesSerde extends WrapperSerde<Bytes> {
|
||||
public BytesSerde() {
|
||||
super(new BytesSerializer(), new BytesDeserializer());
|
||||
}
|
||||
}
|
||||
|
||||
static public final class ByteArraySerde extends WrapperSerde<byte[]> {
|
||||
public static final class ByteArraySerde extends WrapperSerde<byte[]> {
|
||||
public ByteArraySerde() {
|
||||
super(new ByteArraySerializer(), new ByteArrayDeserializer());
|
||||
}
|
||||
}
|
||||
|
||||
static public final class UUIDSerde extends WrapperSerde<UUID> {
|
||||
public static final class UUIDSerde extends WrapperSerde<UUID> {
|
||||
public UUIDSerde() {
|
||||
super(new UUIDSerializer(), new UUIDDeserializer());
|
||||
}
|
||||
}
|
||||
|
||||
static public final class BooleanSerde extends WrapperSerde<Boolean> {
|
||||
public static final class BooleanSerde extends WrapperSerde<Boolean> {
|
||||
public BooleanSerde() {
|
||||
super(new BooleanSerializer(), new BooleanDeserializer());
|
||||
}
|
||||
}
|
||||
|
||||
static public final class ListSerde<Inner> extends WrapperSerde<List<Inner>> {
|
||||
public static final class ListSerde<Inner> extends WrapperSerde<List<Inner>> {
|
||||
|
||||
final static int NULL_ENTRY_VALUE = -1;
|
||||
static final int NULL_ENTRY_VALUE = -1;
|
||||
|
||||
enum SerializationStrategy {
|
||||
CONSTANT_SIZE,
|
||||
|
@ -154,7 +154,7 @@ public class Serdes {
|
|||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
static public <T> Serde<T> serdeFrom(Class<T> type) {
|
||||
public static <T> Serde<T> serdeFrom(Class<T> type) {
|
||||
if (String.class.isAssignableFrom(type)) {
|
||||
return (Serde<T>) String();
|
||||
}
|
||||
|
@ -210,7 +210,7 @@ public class Serdes {
|
|||
* @param serializer must not be null.
|
||||
* @param deserializer must not be null.
|
||||
*/
|
||||
static public <T> Serde<T> serdeFrom(final Serializer<T> serializer, final Deserializer<T> deserializer) {
|
||||
public static <T> Serde<T> serdeFrom(final Serializer<T> serializer, final Deserializer<T> deserializer) {
|
||||
if (serializer == null) {
|
||||
throw new IllegalArgumentException("serializer must not be null");
|
||||
}
|
||||
|
@ -224,91 +224,91 @@ public class Serdes {
|
|||
/**
|
||||
* A serde for nullable {@code Long} type.
|
||||
*/
|
||||
static public Serde<Long> Long() {
|
||||
public static Serde<Long> Long() {
|
||||
return new LongSerde();
|
||||
}
|
||||
|
||||
/**
|
||||
* A serde for nullable {@code Integer} type.
|
||||
*/
|
||||
static public Serde<Integer> Integer() {
|
||||
public static Serde<Integer> Integer() {
|
||||
return new IntegerSerde();
|
||||
}
|
||||
|
||||
/**
|
||||
* A serde for nullable {@code Short} type.
|
||||
*/
|
||||
static public Serde<Short> Short() {
|
||||
public static Serde<Short> Short() {
|
||||
return new ShortSerde();
|
||||
}
|
||||
|
||||
/**
|
||||
* A serde for nullable {@code Float} type.
|
||||
*/
|
||||
static public Serde<Float> Float() {
|
||||
public static Serde<Float> Float() {
|
||||
return new FloatSerde();
|
||||
}
|
||||
|
||||
/**
|
||||
* A serde for nullable {@code Double} type.
|
||||
*/
|
||||
static public Serde<Double> Double() {
|
||||
public static Serde<Double> Double() {
|
||||
return new DoubleSerde();
|
||||
}
|
||||
|
||||
/**
|
||||
* A serde for nullable {@code String} type.
|
||||
*/
|
||||
static public Serde<String> String() {
|
||||
public static Serde<String> String() {
|
||||
return new StringSerde();
|
||||
}
|
||||
|
||||
/**
|
||||
* A serde for nullable {@code ByteBuffer} type.
|
||||
*/
|
||||
static public Serde<ByteBuffer> ByteBuffer() {
|
||||
public static Serde<ByteBuffer> ByteBuffer() {
|
||||
return new ByteBufferSerde();
|
||||
}
|
||||
|
||||
/**
|
||||
* A serde for nullable {@code Bytes} type.
|
||||
*/
|
||||
static public Serde<Bytes> Bytes() {
|
||||
public static Serde<Bytes> Bytes() {
|
||||
return new BytesSerde();
|
||||
}
|
||||
|
||||
/**
|
||||
* A serde for nullable {@code UUID} type
|
||||
*/
|
||||
static public Serde<UUID> UUID() {
|
||||
public static Serde<UUID> UUID() {
|
||||
return new UUIDSerde();
|
||||
}
|
||||
|
||||
/**
|
||||
* A serde for nullable {@code Boolean} type.
|
||||
*/
|
||||
static public Serde<Boolean> Boolean() {
|
||||
public static Serde<Boolean> Boolean() {
|
||||
return new BooleanSerde();
|
||||
}
|
||||
|
||||
/**
|
||||
* A serde for nullable {@code byte[]} type.
|
||||
*/
|
||||
static public Serde<byte[]> ByteArray() {
|
||||
public static Serde<byte[]> ByteArray() {
|
||||
return new ByteArraySerde();
|
||||
}
|
||||
|
||||
/**
|
||||
* A serde for {@code Void} type.
|
||||
*/
|
||||
static public Serde<Void> Void() {
|
||||
public static Serde<Void> Void() {
|
||||
return new VoidSerde();
|
||||
}
|
||||
|
||||
/*
|
||||
* A serde for {@code List} type
|
||||
*/
|
||||
static public <L extends List<Inner>, Inner> Serde<List<Inner>> ListSerde(Class<L> listClass, Serde<Inner> innerSerde) {
|
||||
public static <L extends List<Inner>, Inner> Serde<List<Inner>> ListSerde(Class<L> listClass, Serde<Inner> innerSerde) {
|
||||
return new ListSerde<>(listClass, innerSerde);
|
||||
}
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@ public enum ClientTelemetryState {
|
|||
*/
|
||||
TERMINATED;
|
||||
|
||||
private final static Map<ClientTelemetryState, List<ClientTelemetryState>> VALID_NEXT_STATES = new EnumMap<>(ClientTelemetryState.class);
|
||||
private static final Map<ClientTelemetryState, List<ClientTelemetryState>> VALID_NEXT_STATES = new EnumMap<>(ClientTelemetryState.class);
|
||||
|
||||
static {
|
||||
/*
|
||||
|
|
|
@ -44,11 +44,11 @@ import java.util.function.Predicate;
|
|||
|
||||
public class ClientTelemetryUtils {
|
||||
|
||||
private final static Logger log = LoggerFactory.getLogger(ClientTelemetryUtils.class);
|
||||
private static final Logger log = LoggerFactory.getLogger(ClientTelemetryUtils.class);
|
||||
|
||||
public final static Predicate<? super MetricKeyable> SELECTOR_NO_METRICS = k -> false;
|
||||
public static final Predicate<? super MetricKeyable> SELECTOR_NO_METRICS = k -> false;
|
||||
|
||||
public final static Predicate<? super MetricKeyable> SELECTOR_ALL_METRICS = k -> true;
|
||||
public static final Predicate<? super MetricKeyable> SELECTOR_ALL_METRICS = k -> true;
|
||||
|
||||
/**
|
||||
* Examine the response data and handle different error code accordingly:
|
||||
|
|
|
@ -36,7 +36,7 @@ public class TelemetryMetricNamingConvention {
|
|||
private static final String TAG_JOINER = "_";
|
||||
|
||||
// remove metrics as it is redundant for telemetry metrics naming convention
|
||||
private final static Pattern GROUP_PATTERN = Pattern.compile("\\.(metrics)");
|
||||
private static final Pattern GROUP_PATTERN = Pattern.compile("\\.(metrics)");
|
||||
|
||||
public static MetricNamingStrategy<MetricName> getClientTelemetryMetricNamingStrategy(String prefix) {
|
||||
Objects.requireNonNull(prefix, "prefix cannot be null");
|
||||
|
|
|
@ -169,7 +169,7 @@ public class Bytes implements Comparable<Bytes> {
|
|||
/**
|
||||
* A byte array comparator based on lexicograpic ordering.
|
||||
*/
|
||||
public final static ByteArrayComparator BYTES_LEXICO_COMPARATOR = new LexicographicByteArrayComparator();
|
||||
public static final ByteArrayComparator BYTES_LEXICO_COMPARATOR = new LexicographicByteArrayComparator();
|
||||
|
||||
public interface ByteArrayComparator extends Comparator<byte[]>, Serializable {
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ public class ChildFirstClassLoader extends URLClassLoader {
|
|||
super(classpathToURLs(classPath), parent);
|
||||
}
|
||||
|
||||
static private URL[] classpathToURLs(String classPath) {
|
||||
private static URL[] classpathToURLs(String classPath) {
|
||||
ArrayList<URL> urls = new ArrayList<>();
|
||||
for (String path : classPath.split(File.pathSeparator)) {
|
||||
if (path == null || path.trim().isEmpty())
|
||||
|
|
|
@ -51,9 +51,9 @@ public class Exit {
|
|||
throw new IllegalStateException("Exit called after resetting procedures; possible race condition present in test");
|
||||
};
|
||||
|
||||
private volatile static Procedure exitProcedure = DEFAULT_EXIT_PROCEDURE;
|
||||
private volatile static Procedure haltProcedure = DEFAULT_HALT_PROCEDURE;
|
||||
private volatile static ShutdownHookAdder shutdownHookAdder = DEFAULT_SHUTDOWN_HOOK_ADDER;
|
||||
private static volatile Procedure exitProcedure = DEFAULT_EXIT_PROCEDURE;
|
||||
private static volatile Procedure haltProcedure = DEFAULT_HALT_PROCEDURE;
|
||||
private static volatile ShutdownHookAdder shutdownHookAdder = DEFAULT_SHUTDOWN_HOOK_ADDER;
|
||||
|
||||
public static void exit(int statusCode) {
|
||||
exit(statusCode, null);
|
||||
|
|
|
@ -297,7 +297,7 @@ public class ImplicitLinkedHashCollection<E extends ImplicitLinkedHashCollection
|
|||
* remove on the iterator itself, of course.)
|
||||
*/
|
||||
@Override
|
||||
final public Iterator<E> iterator() {
|
||||
public final Iterator<E> iterator() {
|
||||
return listIterator(0);
|
||||
}
|
||||
|
||||
|
@ -345,7 +345,7 @@ public class ImplicitLinkedHashCollection<E extends ImplicitLinkedHashCollection
|
|||
* @param key The element to match.
|
||||
* @return The matching element, or null if there were none.
|
||||
*/
|
||||
final public E find(E key) {
|
||||
public final E find(E key) {
|
||||
int index = findIndexOfEqualElement(key);
|
||||
if (index == INVALID_INDEX) {
|
||||
return null;
|
||||
|
@ -359,7 +359,7 @@ public class ImplicitLinkedHashCollection<E extends ImplicitLinkedHashCollection
|
|||
* Returns the number of elements in the set.
|
||||
*/
|
||||
@Override
|
||||
final public int size() {
|
||||
public final int size() {
|
||||
return size;
|
||||
}
|
||||
|
||||
|
@ -370,7 +370,7 @@ public class ImplicitLinkedHashCollection<E extends ImplicitLinkedHashCollection
|
|||
* @param key The object to try to match.
|
||||
*/
|
||||
@Override
|
||||
final public boolean contains(Object key) {
|
||||
public final boolean contains(Object key) {
|
||||
return findIndexOfEqualElement(key) != INVALID_INDEX;
|
||||
}
|
||||
|
||||
|
@ -390,7 +390,7 @@ public class ImplicitLinkedHashCollection<E extends ImplicitLinkedHashCollection
|
|||
* false if it was not, because there was an existing equal element.
|
||||
*/
|
||||
@Override
|
||||
final public boolean add(E newElement) {
|
||||
public final boolean add(E newElement) {
|
||||
if (newElement == null) {
|
||||
return false;
|
||||
}
|
||||
|
@ -409,7 +409,7 @@ public class ImplicitLinkedHashCollection<E extends ImplicitLinkedHashCollection
|
|||
return false;
|
||||
}
|
||||
|
||||
final public void mustAdd(E newElement) {
|
||||
public final void mustAdd(E newElement) {
|
||||
if (!add(newElement)) {
|
||||
throw new RuntimeException("Unable to add " + newElement);
|
||||
}
|
||||
|
@ -462,7 +462,7 @@ public class ImplicitLinkedHashCollection<E extends ImplicitLinkedHashCollection
|
|||
* @return True if an element was removed; false otherwise.
|
||||
*/
|
||||
@Override
|
||||
final public boolean remove(Object key) {
|
||||
public final boolean remove(Object key) {
|
||||
int slot = findElementToRemove(key);
|
||||
if (slot == INVALID_INDEX) {
|
||||
return false;
|
||||
|
@ -565,7 +565,7 @@ public class ImplicitLinkedHashCollection<E extends ImplicitLinkedHashCollection
|
|||
* Removes all of the elements from this set.
|
||||
*/
|
||||
@Override
|
||||
final public void clear() {
|
||||
public final void clear() {
|
||||
clear(elements.length);
|
||||
}
|
||||
|
||||
|
@ -573,7 +573,7 @@ public class ImplicitLinkedHashCollection<E extends ImplicitLinkedHashCollection
|
|||
* Moves an element which is already in the collection so that it comes last
|
||||
* in iteration order.
|
||||
*/
|
||||
final public void moveToEnd(E element) {
|
||||
public final void moveToEnd(E element) {
|
||||
if (element.prev() == INVALID_INDEX || element.next() == INVALID_INDEX) {
|
||||
throw new RuntimeException("Element " + element + " is not in the collection.");
|
||||
}
|
||||
|
@ -589,7 +589,7 @@ public class ImplicitLinkedHashCollection<E extends ImplicitLinkedHashCollection
|
|||
* Removes all of the elements from this set, and resets the set capacity
|
||||
* based on the provided expected number of elements.
|
||||
*/
|
||||
final public void clear(int expectedNumElements) {
|
||||
public final void clear(int expectedNumElements) {
|
||||
if (expectedNumElements == 0) {
|
||||
// Optimize away object allocations for empty sets.
|
||||
this.head = HeadElement.EMPTY;
|
||||
|
|
|
@ -119,7 +119,7 @@ public class ImplicitLinkedHashMultiCollection<E extends ImplicitLinkedHashColle
|
|||
*
|
||||
* @return All of the matching elements.
|
||||
*/
|
||||
final public List<E> findAll(E key) {
|
||||
public final List<E> findAll(E key) {
|
||||
if (key == null || size() == 0) {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
|
|
@ -55,7 +55,7 @@ public class LogContext {
|
|||
return logPrefix;
|
||||
}
|
||||
|
||||
private static abstract class AbstractKafkaLogger implements Logger {
|
||||
private abstract static class AbstractKafkaLogger implements Logger {
|
||||
private final String prefix;
|
||||
|
||||
protected AbstractKafkaLogger(final String prefix) {
|
||||
|
|
|
@ -103,7 +103,7 @@ public class PureJavaCrc32C implements Checksum {
|
|||
}
|
||||
|
||||
@Override
|
||||
final public void update(int b) {
|
||||
public final void update(int b) {
|
||||
crc = (crc >>> 8) ^ T[T8_0_START + ((crc ^ b) & 0xff)];
|
||||
}
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ import org.slf4j.LoggerFactory;
|
|||
* <code>Shell</code> can be used to run unix commands like <code>du</code> or
|
||||
* <code>df</code>.
|
||||
*/
|
||||
abstract public class Shell {
|
||||
public abstract class Shell {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(Shell.class);
|
||||
|
||||
|
|
|
@ -270,7 +270,7 @@ public class MockAdminClient extends AdminClient {
|
|||
addTopic(internal, name, partitions, configs, true);
|
||||
}
|
||||
|
||||
synchronized public void addTopic(boolean internal,
|
||||
public synchronized void addTopic(boolean internal,
|
||||
String name,
|
||||
List<TopicPartitionInfo> partitions,
|
||||
Map<String, String> configs,
|
||||
|
@ -306,7 +306,7 @@ public class MockAdminClient extends AdminClient {
|
|||
allTopics.put(name, new TopicMetadata(topicId, internal, partitions, logDirs, configs));
|
||||
}
|
||||
|
||||
synchronized public void markTopicForDeletion(final String name) {
|
||||
public synchronized void markTopicForDeletion(final String name) {
|
||||
if (!allTopics.containsKey(name)) {
|
||||
throw new IllegalArgumentException(String.format("Topic %s did not exist.", name));
|
||||
}
|
||||
|
@ -314,12 +314,12 @@ public class MockAdminClient extends AdminClient {
|
|||
allTopics.get(name).markedForDeletion = true;
|
||||
}
|
||||
|
||||
synchronized public void timeoutNextRequest(int numberOfRequest) {
|
||||
public synchronized void timeoutNextRequest(int numberOfRequest) {
|
||||
timeoutNextRequests = numberOfRequest;
|
||||
}
|
||||
|
||||
@Override
|
||||
synchronized public DescribeClusterResult describeCluster(DescribeClusterOptions options) {
|
||||
public synchronized DescribeClusterResult describeCluster(DescribeClusterOptions options) {
|
||||
KafkaFutureImpl<Collection<Node>> nodesFuture = new KafkaFutureImpl<>();
|
||||
KafkaFutureImpl<Node> controllerFuture = new KafkaFutureImpl<>();
|
||||
KafkaFutureImpl<String> brokerIdFuture = new KafkaFutureImpl<>();
|
||||
|
@ -342,7 +342,7 @@ public class MockAdminClient extends AdminClient {
|
|||
}
|
||||
|
||||
@Override
|
||||
synchronized public CreateTopicsResult createTopics(Collection<NewTopic> newTopics, CreateTopicsOptions options) {
|
||||
public synchronized CreateTopicsResult createTopics(Collection<NewTopic> newTopics, CreateTopicsOptions options) {
|
||||
Map<String, KafkaFuture<CreateTopicsResult.TopicMetadataAndConfig>> createTopicResult = new HashMap<>();
|
||||
|
||||
if (timeoutNextRequests > 0) {
|
||||
|
@ -416,7 +416,7 @@ public class MockAdminClient extends AdminClient {
|
|||
}
|
||||
|
||||
@Override
|
||||
synchronized public ListTopicsResult listTopics(ListTopicsOptions options) {
|
||||
public synchronized ListTopicsResult listTopics(ListTopicsOptions options) {
|
||||
Map<String, TopicListing> topicListings = new HashMap<>();
|
||||
|
||||
if (timeoutNextRequests > 0) {
|
||||
|
@ -442,7 +442,7 @@ public class MockAdminClient extends AdminClient {
|
|||
}
|
||||
|
||||
@Override
|
||||
synchronized public DescribeTopicsResult describeTopics(TopicCollection topics, DescribeTopicsOptions options) {
|
||||
public synchronized DescribeTopicsResult describeTopics(TopicCollection topics, DescribeTopicsOptions options) {
|
||||
if (topics instanceof TopicIdCollection)
|
||||
return DescribeTopicsResult.ofTopicIds(new HashMap<>(handleDescribeTopicsUsingIds(((TopicIdCollection) topics).topicIds(), options)));
|
||||
else if (topics instanceof TopicNameCollection)
|
||||
|
@ -491,7 +491,7 @@ public class MockAdminClient extends AdminClient {
|
|||
return topicDescriptions;
|
||||
}
|
||||
|
||||
synchronized public Map<Uuid, KafkaFuture<TopicDescription>> handleDescribeTopicsUsingIds(Collection<Uuid> topicIds, DescribeTopicsOptions options) {
|
||||
public synchronized Map<Uuid, KafkaFuture<TopicDescription>> handleDescribeTopicsUsingIds(Collection<Uuid> topicIds, DescribeTopicsOptions options) {
|
||||
|
||||
Map<Uuid, KafkaFuture<TopicDescription>> topicDescriptions = new HashMap<>();
|
||||
|
||||
|
@ -534,7 +534,7 @@ public class MockAdminClient extends AdminClient {
|
|||
}
|
||||
|
||||
@Override
|
||||
synchronized public DeleteTopicsResult deleteTopics(TopicCollection topics, DeleteTopicsOptions options) {
|
||||
public synchronized DeleteTopicsResult deleteTopics(TopicCollection topics, DeleteTopicsOptions options) {
|
||||
DeleteTopicsResult result;
|
||||
if (topics instanceof TopicIdCollection)
|
||||
result = DeleteTopicsResult.ofTopicIds(new HashMap<>(handleDeleteTopicsUsingIds(((TopicIdCollection) topics).topicIds(), options)));
|
||||
|
@ -605,12 +605,12 @@ public class MockAdminClient extends AdminClient {
|
|||
}
|
||||
|
||||
@Override
|
||||
synchronized public CreatePartitionsResult createPartitions(Map<String, NewPartitions> newPartitions, CreatePartitionsOptions options) {
|
||||
public synchronized CreatePartitionsResult createPartitions(Map<String, NewPartitions> newPartitions, CreatePartitionsOptions options) {
|
||||
throw new UnsupportedOperationException("Not implemented yet");
|
||||
}
|
||||
|
||||
@Override
|
||||
synchronized public DeleteRecordsResult deleteRecords(Map<TopicPartition, RecordsToDelete> recordsToDelete, DeleteRecordsOptions options) {
|
||||
public synchronized DeleteRecordsResult deleteRecords(Map<TopicPartition, RecordsToDelete> recordsToDelete, DeleteRecordsOptions options) {
|
||||
Map<TopicPartition, KafkaFuture<DeletedRecords>> deletedRecordsResult = new HashMap<>();
|
||||
if (recordsToDelete.isEmpty()) {
|
||||
return new DeleteRecordsResult(deletedRecordsResult);
|
||||
|
@ -620,7 +620,7 @@ public class MockAdminClient extends AdminClient {
|
|||
}
|
||||
|
||||
@Override
|
||||
synchronized public CreateDelegationTokenResult createDelegationToken(CreateDelegationTokenOptions options) {
|
||||
public synchronized CreateDelegationTokenResult createDelegationToken(CreateDelegationTokenOptions options) {
|
||||
KafkaFutureImpl<DelegationToken> future = new KafkaFutureImpl<>();
|
||||
|
||||
for (KafkaPrincipal renewer : options.renewers()) {
|
||||
|
@ -640,7 +640,7 @@ public class MockAdminClient extends AdminClient {
|
|||
}
|
||||
|
||||
@Override
|
||||
synchronized public RenewDelegationTokenResult renewDelegationToken(byte[] hmac, RenewDelegationTokenOptions options) {
|
||||
public synchronized RenewDelegationTokenResult renewDelegationToken(byte[] hmac, RenewDelegationTokenOptions options) {
|
||||
KafkaFutureImpl<Long> future = new KafkaFutureImpl<>();
|
||||
|
||||
boolean tokenFound = false;
|
||||
|
@ -662,7 +662,7 @@ public class MockAdminClient extends AdminClient {
|
|||
}
|
||||
|
||||
@Override
|
||||
synchronized public ExpireDelegationTokenResult expireDelegationToken(byte[] hmac, ExpireDelegationTokenOptions options) {
|
||||
public synchronized ExpireDelegationTokenResult expireDelegationToken(byte[] hmac, ExpireDelegationTokenOptions options) {
|
||||
KafkaFutureImpl<Long> future = new KafkaFutureImpl<>();
|
||||
|
||||
long expiryTimestamp = options.expiryTimePeriodMs();
|
||||
|
@ -688,7 +688,7 @@ public class MockAdminClient extends AdminClient {
|
|||
}
|
||||
|
||||
@Override
|
||||
synchronized public DescribeDelegationTokenResult describeDelegationToken(DescribeDelegationTokenOptions options) {
|
||||
public synchronized DescribeDelegationTokenResult describeDelegationToken(DescribeDelegationTokenOptions options) {
|
||||
KafkaFutureImpl<List<DelegationToken>> future = new KafkaFutureImpl<>();
|
||||
|
||||
if (options.owners().isEmpty()) {
|
||||
|
@ -707,17 +707,17 @@ public class MockAdminClient extends AdminClient {
|
|||
}
|
||||
|
||||
@Override
|
||||
synchronized public DescribeConsumerGroupsResult describeConsumerGroups(Collection<String> groupIds, DescribeConsumerGroupsOptions options) {
|
||||
public synchronized DescribeConsumerGroupsResult describeConsumerGroups(Collection<String> groupIds, DescribeConsumerGroupsOptions options) {
|
||||
throw new UnsupportedOperationException("Not implemented yet");
|
||||
}
|
||||
|
||||
@Override
|
||||
synchronized public ListConsumerGroupsResult listConsumerGroups(ListConsumerGroupsOptions options) {
|
||||
public synchronized ListConsumerGroupsResult listConsumerGroups(ListConsumerGroupsOptions options) {
|
||||
throw new UnsupportedOperationException("Not implemented yet");
|
||||
}
|
||||
|
||||
@Override
|
||||
synchronized public ListConsumerGroupOffsetsResult listConsumerGroupOffsets(Map<String, ListConsumerGroupOffsetsSpec> groupSpecs, ListConsumerGroupOffsetsOptions options) {
|
||||
public synchronized ListConsumerGroupOffsetsResult listConsumerGroupOffsets(Map<String, ListConsumerGroupOffsetsSpec> groupSpecs, ListConsumerGroupOffsetsOptions options) {
|
||||
// ignoring the groups and assume one test would only work on one group only
|
||||
if (groupSpecs.size() != 1)
|
||||
throw new UnsupportedOperationException("Not implemented yet");
|
||||
|
@ -732,17 +732,17 @@ public class MockAdminClient extends AdminClient {
|
|||
}
|
||||
|
||||
@Override
|
||||
synchronized public DeleteConsumerGroupsResult deleteConsumerGroups(Collection<String> groupIds, DeleteConsumerGroupsOptions options) {
|
||||
public synchronized DeleteConsumerGroupsResult deleteConsumerGroups(Collection<String> groupIds, DeleteConsumerGroupsOptions options) {
|
||||
throw new UnsupportedOperationException("Not implemented yet");
|
||||
}
|
||||
|
||||
@Override
|
||||
synchronized public DeleteConsumerGroupOffsetsResult deleteConsumerGroupOffsets(String groupId, Set<TopicPartition> partitions, DeleteConsumerGroupOffsetsOptions options) {
|
||||
public synchronized DeleteConsumerGroupOffsetsResult deleteConsumerGroupOffsets(String groupId, Set<TopicPartition> partitions, DeleteConsumerGroupOffsetsOptions options) {
|
||||
throw new UnsupportedOperationException("Not implemented yet");
|
||||
}
|
||||
|
||||
@Override
|
||||
synchronized public ElectLeadersResult electLeaders(
|
||||
public synchronized ElectLeadersResult electLeaders(
|
||||
ElectionType electionType,
|
||||
Set<TopicPartition> partitions,
|
||||
ElectLeadersOptions options) {
|
||||
|
@ -750,27 +750,27 @@ public class MockAdminClient extends AdminClient {
|
|||
}
|
||||
|
||||
@Override
|
||||
synchronized public RemoveMembersFromConsumerGroupResult removeMembersFromConsumerGroup(String groupId, RemoveMembersFromConsumerGroupOptions options) {
|
||||
public synchronized RemoveMembersFromConsumerGroupResult removeMembersFromConsumerGroup(String groupId, RemoveMembersFromConsumerGroupOptions options) {
|
||||
throw new UnsupportedOperationException("Not implemented yet");
|
||||
}
|
||||
|
||||
@Override
|
||||
synchronized public CreateAclsResult createAcls(Collection<AclBinding> acls, CreateAclsOptions options) {
|
||||
public synchronized CreateAclsResult createAcls(Collection<AclBinding> acls, CreateAclsOptions options) {
|
||||
throw new UnsupportedOperationException("Not implemented yet");
|
||||
}
|
||||
|
||||
@Override
|
||||
synchronized public DescribeAclsResult describeAcls(AclBindingFilter filter, DescribeAclsOptions options) {
|
||||
public synchronized DescribeAclsResult describeAcls(AclBindingFilter filter, DescribeAclsOptions options) {
|
||||
throw new UnsupportedOperationException("Not implemented yet");
|
||||
}
|
||||
|
||||
@Override
|
||||
synchronized public DeleteAclsResult deleteAcls(Collection<AclBindingFilter> filters, DeleteAclsOptions options) {
|
||||
public synchronized DeleteAclsResult deleteAcls(Collection<AclBindingFilter> filters, DeleteAclsOptions options) {
|
||||
throw new UnsupportedOperationException("Not implemented yet");
|
||||
}
|
||||
|
||||
@Override
|
||||
synchronized public DescribeConfigsResult describeConfigs(Collection<ConfigResource> resources, DescribeConfigsOptions options) {
|
||||
public synchronized DescribeConfigsResult describeConfigs(Collection<ConfigResource> resources, DescribeConfigsOptions options) {
|
||||
|
||||
if (timeoutNextRequests > 0) {
|
||||
Map<ConfigResource, KafkaFuture<Config>> configs = new HashMap<>();
|
||||
|
@ -797,7 +797,7 @@ public class MockAdminClient extends AdminClient {
|
|||
return new DescribeConfigsResult(results);
|
||||
}
|
||||
|
||||
synchronized private Config getResourceDescription(ConfigResource resource) {
|
||||
private synchronized Config getResourceDescription(ConfigResource resource) {
|
||||
switch (resource.type()) {
|
||||
case BROKER: {
|
||||
int brokerId = Integer.parseInt(resource.name());
|
||||
|
@ -839,12 +839,12 @@ public class MockAdminClient extends AdminClient {
|
|||
|
||||
@Override
|
||||
@Deprecated
|
||||
synchronized public AlterConfigsResult alterConfigs(Map<ConfigResource, Config> configs, AlterConfigsOptions options) {
|
||||
public synchronized AlterConfigsResult alterConfigs(Map<ConfigResource, Config> configs, AlterConfigsOptions options) {
|
||||
throw new UnsupportedOperationException("Not implemented yet");
|
||||
}
|
||||
|
||||
@Override
|
||||
synchronized public AlterConfigsResult incrementalAlterConfigs(
|
||||
public synchronized AlterConfigsResult incrementalAlterConfigs(
|
||||
Map<ConfigResource, Collection<AlterConfigOp>> configs,
|
||||
AlterConfigsOptions options) {
|
||||
Map<ConfigResource, KafkaFuture<Void>> futures = new HashMap<>();
|
||||
|
@ -864,7 +864,7 @@ public class MockAdminClient extends AdminClient {
|
|||
return new AlterConfigsResult(futures);
|
||||
}
|
||||
|
||||
synchronized private Throwable handleIncrementalResourceAlteration(
|
||||
private synchronized Throwable handleIncrementalResourceAlteration(
|
||||
ConfigResource resource, Collection<AlterConfigOp> ops) {
|
||||
switch (resource.type()) {
|
||||
case BROKER: {
|
||||
|
@ -951,7 +951,7 @@ public class MockAdminClient extends AdminClient {
|
|||
}
|
||||
|
||||
@Override
|
||||
synchronized public AlterReplicaLogDirsResult alterReplicaLogDirs(
|
||||
public synchronized AlterReplicaLogDirsResult alterReplicaLogDirs(
|
||||
Map<TopicPartitionReplica, String> replicaAssignment,
|
||||
AlterReplicaLogDirsOptions options) {
|
||||
Map<TopicPartitionReplica, KafkaFuture<Void>> results = new HashMap<>();
|
||||
|
@ -984,7 +984,7 @@ public class MockAdminClient extends AdminClient {
|
|||
}
|
||||
|
||||
@Override
|
||||
synchronized public DescribeLogDirsResult describeLogDirs(Collection<Integer> brokers,
|
||||
public synchronized DescribeLogDirsResult describeLogDirs(Collection<Integer> brokers,
|
||||
DescribeLogDirsOptions options) {
|
||||
Map<Integer, Map<String, LogDirDescription>> unwrappedResults = new HashMap<>();
|
||||
|
||||
|
@ -1026,7 +1026,7 @@ public class MockAdminClient extends AdminClient {
|
|||
}
|
||||
|
||||
@Override
|
||||
synchronized public DescribeReplicaLogDirsResult describeReplicaLogDirs(
|
||||
public synchronized DescribeReplicaLogDirsResult describeReplicaLogDirs(
|
||||
Collection<TopicPartitionReplica> replicas, DescribeReplicaLogDirsOptions options) {
|
||||
Map<TopicPartitionReplica, KafkaFuture<ReplicaLogDirInfo>> results = new HashMap<>();
|
||||
for (TopicPartitionReplica replica : replicas) {
|
||||
|
@ -1065,7 +1065,7 @@ public class MockAdminClient extends AdminClient {
|
|||
}
|
||||
|
||||
@Override
|
||||
synchronized public AlterPartitionReassignmentsResult alterPartitionReassignments(
|
||||
public synchronized AlterPartitionReassignmentsResult alterPartitionReassignments(
|
||||
Map<TopicPartition, Optional<NewPartitionReassignment>> newReassignments,
|
||||
AlterPartitionReassignmentsOptions options) {
|
||||
Map<TopicPartition, KafkaFuture<Void>> futures = new HashMap<>();
|
||||
|
@ -1092,7 +1092,7 @@ public class MockAdminClient extends AdminClient {
|
|||
}
|
||||
|
||||
@Override
|
||||
synchronized public ListPartitionReassignmentsResult listPartitionReassignments(
|
||||
public synchronized ListPartitionReassignmentsResult listPartitionReassignments(
|
||||
Optional<Set<TopicPartition>> partitions,
|
||||
ListPartitionReassignmentsOptions options) {
|
||||
Map<TopicPartition, PartitionReassignment> map = new HashMap<>();
|
||||
|
@ -1106,7 +1106,7 @@ public class MockAdminClient extends AdminClient {
|
|||
return new ListPartitionReassignmentsResult(KafkaFutureImpl.completedFuture(map));
|
||||
}
|
||||
|
||||
synchronized private PartitionReassignment findPartitionReassignment(TopicPartition partition) {
|
||||
private synchronized PartitionReassignment findPartitionReassignment(TopicPartition partition) {
|
||||
NewPartitionReassignment reassignment = reassignments.get(partition);
|
||||
if (reassignment == null) {
|
||||
return null;
|
||||
|
@ -1135,12 +1135,12 @@ public class MockAdminClient extends AdminClient {
|
|||
}
|
||||
|
||||
@Override
|
||||
synchronized public AlterConsumerGroupOffsetsResult alterConsumerGroupOffsets(String groupId, Map<TopicPartition, OffsetAndMetadata> offsets, AlterConsumerGroupOffsetsOptions options) {
|
||||
public synchronized AlterConsumerGroupOffsetsResult alterConsumerGroupOffsets(String groupId, Map<TopicPartition, OffsetAndMetadata> offsets, AlterConsumerGroupOffsetsOptions options) {
|
||||
throw new UnsupportedOperationException("Not implement yet");
|
||||
}
|
||||
|
||||
@Override
|
||||
synchronized public ListOffsetsResult listOffsets(Map<TopicPartition, OffsetSpec> topicPartitionOffsets, ListOffsetsOptions options) {
|
||||
public synchronized ListOffsetsResult listOffsets(Map<TopicPartition, OffsetSpec> topicPartitionOffsets, ListOffsetsOptions options) {
|
||||
Map<TopicPartition, KafkaFuture<ListOffsetsResult.ListOffsetsResultInfo>> futures = new HashMap<>();
|
||||
|
||||
for (Map.Entry<TopicPartition, OffsetSpec> entry : topicPartitionOffsets.entrySet()) {
|
||||
|
@ -1317,7 +1317,7 @@ public class MockAdminClient extends AdminClient {
|
|||
}
|
||||
|
||||
@Override
|
||||
synchronized public void close(Duration timeout) {}
|
||||
public synchronized void close(Duration timeout) {}
|
||||
|
||||
public synchronized void updateBeginningOffsets(Map<TopicPartition, Long> newOffsets) {
|
||||
beginningOffsets.putAll(newOffsets);
|
||||
|
@ -1331,7 +1331,7 @@ public class MockAdminClient extends AdminClient {
|
|||
committedOffsets.putAll(newOffsets);
|
||||
}
|
||||
|
||||
private final static class TopicMetadata {
|
||||
private static final class TopicMetadata {
|
||||
final Uuid topicId;
|
||||
final boolean isInternalTopic;
|
||||
final List<TopicPartitionInfo> partitions;
|
||||
|
@ -1356,7 +1356,7 @@ public class MockAdminClient extends AdminClient {
|
|||
}
|
||||
}
|
||||
|
||||
synchronized public void setMockMetrics(MetricName name, Metric metric) {
|
||||
public synchronized void setMockMetrics(MetricName name, Metric metric) {
|
||||
mockMetrics.put(name, metric);
|
||||
}
|
||||
|
||||
|
@ -1404,11 +1404,11 @@ public class MockAdminClient extends AdminClient {
|
|||
}
|
||||
|
||||
@Override
|
||||
synchronized public Map<MetricName, ? extends Metric> metrics() {
|
||||
public synchronized Map<MetricName, ? extends Metric> metrics() {
|
||||
return mockMetrics;
|
||||
}
|
||||
|
||||
synchronized public void setFetchesRemainingUntilVisible(String topicName, int fetchesRemainingUntilVisible) {
|
||||
public synchronized void setFetchesRemainingUntilVisible(String topicName, int fetchesRemainingUntilVisible) {
|
||||
TopicMetadata metadata = allTopics.get(topicName);
|
||||
if (metadata == null) {
|
||||
throw new RuntimeException("No such topic as " + topicName);
|
||||
|
@ -1416,11 +1416,11 @@ public class MockAdminClient extends AdminClient {
|
|||
metadata.fetchesRemainingUntilVisible = fetchesRemainingUntilVisible;
|
||||
}
|
||||
|
||||
synchronized public List<Node> brokers() {
|
||||
public synchronized List<Node> brokers() {
|
||||
return new ArrayList<>(brokers);
|
||||
}
|
||||
|
||||
synchronized public Node broker(int index) {
|
||||
public synchronized Node broker(int index) {
|
||||
return brokers.get(index);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -58,10 +58,10 @@ import static org.junit.jupiter.api.Assertions.assertThrows;
|
|||
|
||||
public class CompletedFetchTest {
|
||||
|
||||
private final static String TOPIC_NAME = "test";
|
||||
private final static TopicPartition TP = new TopicPartition(TOPIC_NAME, 0);
|
||||
private final static long PRODUCER_ID = 1000L;
|
||||
private final static short PRODUCER_EPOCH = 0;
|
||||
private static final String TOPIC_NAME = "test";
|
||||
private static final TopicPartition TP = new TopicPartition(TOPIC_NAME, 0);
|
||||
private static final long PRODUCER_ID = 1000L;
|
||||
private static final short PRODUCER_EPOCH = 0;
|
||||
|
||||
@Test
|
||||
public void testSimple() {
|
||||
|
|
|
@ -76,8 +76,8 @@ import static org.mockito.Mockito.when;
|
|||
@MockitoSettings(strictness = Strictness.STRICT_STUBS)
|
||||
public class FetchCollectorTest {
|
||||
|
||||
private final static int DEFAULT_RECORD_COUNT = 10;
|
||||
private final static int DEFAULT_MAX_POLL_RECORDS = ConsumerConfig.DEFAULT_MAX_POLL_RECORDS;
|
||||
private static final int DEFAULT_RECORD_COUNT = 10;
|
||||
private static final int DEFAULT_MAX_POLL_RECORDS = ConsumerConfig.DEFAULT_MAX_POLL_RECORDS;
|
||||
private final Time time = new MockTime(0, 0, 0);
|
||||
private final TopicPartition topicAPartition0 = new TopicPartition("topic-a", 0);
|
||||
private final TopicPartition topicAPartition1 = new TopicPartition("topic-a", 1);
|
||||
|
|
|
@ -38,8 +38,8 @@ public class FetchMetricsManagerTest {
|
|||
private static final double EPSILON = 0.0001;
|
||||
|
||||
private final Time time = new MockTime(1, 0, 0);
|
||||
private final static String TOPIC_NAME = "test";
|
||||
private final static TopicPartition TP = new TopicPartition(TOPIC_NAME, 0);
|
||||
private static final String TOPIC_NAME = "test";
|
||||
private static final TopicPartition TP = new TopicPartition(TOPIC_NAME, 0);
|
||||
|
||||
private Metrics metrics;
|
||||
private FetchMetricsRegistry metricsRegistry;
|
||||
|
|
|
@ -28,7 +28,7 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
|
|||
|
||||
public class TimedRequestStateTest {
|
||||
|
||||
private final static long DEFAULT_TIMEOUT_MS = 30000;
|
||||
private static final long DEFAULT_TIMEOUT_MS = 30000;
|
||||
private final Time time = new MockTime();
|
||||
|
||||
@Test
|
||||
|
|
|
@ -24,10 +24,10 @@ import org.apache.kafka.common.TopicPartition;
|
|||
import org.junit.jupiter.api.Test;
|
||||
|
||||
public class TopicPartitionComparatorTest {
|
||||
private final static TopicPartition TP1 = new TopicPartition("t1", 0);
|
||||
private final static TopicPartition TP1_COPY = new TopicPartition("t1", 0);
|
||||
private final static TopicPartition TP2 = new TopicPartition("t1", 1);
|
||||
private final static TopicPartition TP3 = new TopicPartition("t11", 1);
|
||||
private static final TopicPartition TP1 = new TopicPartition("t1", 0);
|
||||
private static final TopicPartition TP1_COPY = new TopicPartition("t1", 0);
|
||||
private static final TopicPartition TP2 = new TopicPartition("t1", 1);
|
||||
private static final TopicPartition TP3 = new TopicPartition("t11", 1);
|
||||
|
||||
private final TopicPartitionComparator comparator = new TopicPartitionComparator();
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ import static org.mockito.Mockito.verify;
|
|||
|
||||
@MockitoSettings(strictness = Strictness.STRICT_STUBS)
|
||||
public class WakeupTriggerTest {
|
||||
private final static long DEFAULT_TIMEOUT_MS = 1000;
|
||||
private static final long DEFAULT_TIMEOUT_MS = 1000;
|
||||
private WakeupTrigger wakeupTrigger;
|
||||
|
||||
@BeforeEach
|
||||
|
|
|
@ -30,7 +30,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals;
|
|||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
|
||||
public class RoundRobinPartitionerTest {
|
||||
private final static Node[] NODES = new Node[] {
|
||||
private static final Node[] NODES = new Node[] {
|
||||
new Node(0, "localhost", 99),
|
||||
new Node(1, "localhost", 100),
|
||||
new Node(2, "localhost", 101)
|
||||
|
|
|
@ -30,14 +30,14 @@ import static org.junit.jupiter.api.Assertions.assertEquals;
|
|||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
|
||||
public class UniformStickyPartitionerTest {
|
||||
private final static Node[] NODES = new Node[] {
|
||||
private static final Node[] NODES = new Node[] {
|
||||
new Node(0, "localhost", 99),
|
||||
new Node(1, "localhost", 100),
|
||||
new Node(2, "localhost", 101)
|
||||
};
|
||||
|
||||
private final static String TOPIC_A = "TOPIC_A";
|
||||
private final static String TOPIC_B = "TOPIC_B";
|
||||
private static final String TOPIC_A = "TOPIC_A";
|
||||
private static final String TOPIC_B = "TOPIC_B";
|
||||
|
||||
@SuppressWarnings("deprecation")
|
||||
@Test
|
||||
|
|
|
@ -36,15 +36,15 @@ import static org.junit.jupiter.api.Assertions.assertThrows;
|
|||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
|
||||
public class BuiltInPartitionerTest {
|
||||
private final static Node[] NODES = new Node[] {
|
||||
private static final Node[] NODES = new Node[] {
|
||||
new Node(0, "localhost", 99),
|
||||
new Node(1, "localhost", 100),
|
||||
new Node(2, "localhost", 101),
|
||||
new Node(11, "localhost", 102)
|
||||
};
|
||||
final static String TOPIC_A = "topicA";
|
||||
final static String TOPIC_B = "topicB";
|
||||
final static String TOPIC_C = "topicC";
|
||||
static final String TOPIC_A = "topicA";
|
||||
static final String TOPIC_B = "topicB";
|
||||
static final String TOPIC_C = "topicC";
|
||||
final LogContext logContext = new LogContext();
|
||||
|
||||
@AfterEach
|
||||
|
|
|
@ -28,15 +28,15 @@ import static java.util.Arrays.asList;
|
|||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
|
||||
public class DefaultPartitionerTest {
|
||||
private final static byte[] KEY_BYTES = "key".getBytes();
|
||||
private final static Node[] NODES = new Node[] {
|
||||
private static final byte[] KEY_BYTES = "key".getBytes();
|
||||
private static final Node[] NODES = new Node[] {
|
||||
new Node(0, "localhost", 99),
|
||||
new Node(1, "localhost", 100),
|
||||
new Node(12, "localhost", 101)
|
||||
};
|
||||
private final static String TOPIC = "test";
|
||||
private static final String TOPIC = "test";
|
||||
// Intentionally make the partition list not in partition order to test the edge cases.
|
||||
private final static List<PartitionInfo> PARTITIONS = asList(new PartitionInfo(TOPIC, 1, null, NODES, NODES),
|
||||
private static final List<PartitionInfo> PARTITIONS = asList(new PartitionInfo(TOPIC, 1, null, NODES, NODES),
|
||||
new PartitionInfo(TOPIC, 2, NODES[1], NODES, NODES),
|
||||
new PartitionInfo(TOPIC, 0, NODES[0], NODES, NODES));
|
||||
|
||||
|
|
|
@ -29,15 +29,15 @@ import static org.junit.jupiter.api.Assertions.assertEquals;
|
|||
import static org.junit.jupiter.api.Assertions.assertNotEquals;
|
||||
|
||||
public class StickyPartitionCacheTest {
|
||||
private final static Node[] NODES = new Node[] {
|
||||
private static final Node[] NODES = new Node[] {
|
||||
new Node(0, "localhost", 99),
|
||||
new Node(1, "localhost", 100),
|
||||
new Node(2, "localhost", 101),
|
||||
new Node(11, "localhost", 102)
|
||||
};
|
||||
final static String TOPIC_A = "topicA";
|
||||
final static String TOPIC_B = "topicB";
|
||||
final static String TOPIC_C = "topicC";
|
||||
static final String TOPIC_A = "topicA";
|
||||
static final String TOPIC_B = "topicB";
|
||||
static final String TOPIC_C = "topicC";
|
||||
|
||||
@Test
|
||||
public void testStickyPartitionCache() {
|
||||
|
|
|
@ -34,18 +34,18 @@ import static org.junit.jupiter.api.Assertions.assertThrows;
|
|||
|
||||
public class ClusterTest {
|
||||
|
||||
private final static Node[] NODES = new Node[] {
|
||||
private static final Node[] NODES = new Node[] {
|
||||
new Node(0, "localhost", 99),
|
||||
new Node(1, "localhost", 100),
|
||||
new Node(2, "localhost", 101),
|
||||
new Node(11, "localhost", 102)
|
||||
};
|
||||
|
||||
private final static String TOPIC_A = "topicA";
|
||||
private final static String TOPIC_B = "topicB";
|
||||
private final static String TOPIC_C = "topicC";
|
||||
private final static String TOPIC_D = "topicD";
|
||||
private final static String TOPIC_E = "topicE";
|
||||
private static final String TOPIC_A = "topicA";
|
||||
private static final String TOPIC_B = "topicB";
|
||||
private static final String TOPIC_C = "topicC";
|
||||
private static final String TOPIC_D = "topicD";
|
||||
private static final String TOPIC_E = "topicE";
|
||||
|
||||
@Test
|
||||
public void testBootstrap() {
|
||||
|
|
|
@ -53,7 +53,7 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
|
|||
|
||||
public class Lz4CompressionTest {
|
||||
|
||||
private final static Random RANDOM = new Random(0);
|
||||
private static final Random RANDOM = new Random(0);
|
||||
|
||||
@Test
|
||||
public void testLz4FramingMagicV0() {
|
||||
|
|
|
@ -61,7 +61,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals;
|
|||
*
|
||||
*/
|
||||
public class NioEchoServer extends Thread {
|
||||
private final static Logger LOG = LoggerFactory.getLogger(NioEchoServer.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(NioEchoServer.class);
|
||||
|
||||
public enum MetricType {
|
||||
TOTAL, RATE, AVG, MAX;
|
||||
|
|
|
@ -27,7 +27,7 @@ import org.junit.jupiter.api.Test;
|
|||
|
||||
public class ConfigurationUtilsTest extends OAuthBearerTest {
|
||||
|
||||
private final static String URL_CONFIG_NAME = "url";
|
||||
private static final String URL_CONFIG_NAME = "url";
|
||||
|
||||
@Test
|
||||
public void testUrl() {
|
||||
|
|
|
@ -43,8 +43,8 @@ import static org.junit.jupiter.api.Assertions.assertThrows;
|
|||
|
||||
public class SerializationTest {
|
||||
|
||||
final private String topic = "testTopic";
|
||||
final private Map<Class<?>, List<Object>> testData = new HashMap<Class<?>, List<Object>>() {
|
||||
private final String topic = "testTopic";
|
||||
private final Map<Class<?>, List<Object>> testData = new HashMap<Class<?>, List<Object>>() {
|
||||
{
|
||||
put(String.class, Arrays.asList(null, "my string"));
|
||||
put(Short.class, Arrays.asList(null, (short) 32767, (short) -32768));
|
||||
|
|
|
@ -44,7 +44,7 @@ import static org.junit.jupiter.api.Assertions.assertThrows;
|
|||
@Timeout(120)
|
||||
public class ImplicitLinkedHashCollectionTest {
|
||||
|
||||
final static class TestElement implements ImplicitLinkedHashCollection.Element {
|
||||
static final class TestElement implements ImplicitLinkedHashCollection.Element {
|
||||
private int prev = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
private int next = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
private final int key;
|
||||
|
|
|
@ -44,7 +44,7 @@ public class ShellTest {
|
|||
assertEquals(length, output.length());
|
||||
}
|
||||
|
||||
private final static String NONEXISTENT_PATH = "/dev/a/path/that/does/not/exist/in/the/filesystem";
|
||||
private static final String NONEXISTENT_PATH = "/dev/a/path/that/does/not/exist/in/the/filesystem";
|
||||
|
||||
@Test
|
||||
public void testAttemptToRunNonExistentProgram() {
|
||||
|
|
|
@ -34,7 +34,7 @@ public class StringConverterConfig extends ConverterConfig {
|
|||
private static final String ENCODING_DOC = "The name of the Java character set to use for encoding strings as byte arrays.";
|
||||
private static final String ENCODING_DISPLAY = "Encoding";
|
||||
|
||||
private final static ConfigDef CONFIG;
|
||||
private static final ConfigDef CONFIG;
|
||||
|
||||
static {
|
||||
CONFIG = ConverterConfig.newConfigDef();
|
||||
|
|
|
@ -51,7 +51,7 @@ public class JsonConverterConfig extends ConverterConfig {
|
|||
private static final String REPLACE_NULL_WITH_DEFAULT_DOC = "Whether to replace fields that have a default value and that are null to the default value. When set to true, the default value is used, otherwise null is used.";
|
||||
private static final String REPLACE_NULL_WITH_DEFAULT_DISPLAY = "Replace null with default";
|
||||
|
||||
private final static ConfigDef CONFIG;
|
||||
private static final ConfigDef CONFIG;
|
||||
|
||||
static {
|
||||
String group = "Schemas";
|
||||
|
|
|
@ -233,7 +233,7 @@ public class MirrorClient implements AutoCloseable {
|
|||
return sources;
|
||||
}
|
||||
|
||||
static private boolean endOfStream(Consumer<?, ?> consumer, Collection<TopicPartition> assignments) {
|
||||
private static boolean endOfStream(Consumer<?, ?> consumer, Collection<TopicPartition> assignments) {
|
||||
Map<TopicPartition, Long> endOffsets = consumer.endOffsets(assignments);
|
||||
for (TopicPartition topicPartition : assignments) {
|
||||
if (consumer.position(topicPartition) < endOffsets.get(topicPartition)) {
|
||||
|
|
|
@ -255,7 +255,7 @@ public class MirrorCheckpointConfig extends MirrorConnectorConfig {
|
|||
TOPIC_FILTER_CLASS_DOC);
|
||||
}
|
||||
|
||||
protected final static ConfigDef CONNECTOR_CONFIG_DEF = defineCheckpointConfig(new ConfigDef(BASE_CONNECTOR_CONFIG_DEF));
|
||||
protected static final ConfigDef CONNECTOR_CONFIG_DEF = defineCheckpointConfig(new ConfigDef(BASE_CONNECTOR_CONFIG_DEF));
|
||||
|
||||
public static void main(String[] args) {
|
||||
System.out.println(defineCheckpointConfig(new ConfigDef()).toHtml(4, config -> "mirror_checkpoint_" + config));
|
||||
|
|
|
@ -79,7 +79,7 @@ public class MirrorHeartbeatConfig extends MirrorConnectorConfig {
|
|||
HEARTBEATS_TOPIC_REPLICATION_FACTOR_DOC);
|
||||
}
|
||||
|
||||
protected final static ConfigDef CONNECTOR_CONFIG_DEF = defineHeartbeatConfig(new ConfigDef(BASE_CONNECTOR_CONFIG_DEF));
|
||||
protected static final ConfigDef CONNECTOR_CONFIG_DEF = defineHeartbeatConfig(new ConfigDef(BASE_CONNECTOR_CONFIG_DEF));
|
||||
|
||||
public static void main(String[] args) {
|
||||
System.out.println(defineHeartbeatConfig(new ConfigDef()).toHtml(4, config -> "mirror_heartbeat_" + config));
|
||||
|
|
|
@ -346,7 +346,7 @@ public class MirrorSourceConfig extends MirrorConnectorConfig {
|
|||
ADD_SOURCE_ALIAS_TO_METRICS_DOC);
|
||||
}
|
||||
|
||||
protected final static ConfigDef CONNECTOR_CONFIG_DEF = defineSourceConfig(new ConfigDef(BASE_CONNECTOR_CONFIG_DEF));
|
||||
protected static final ConfigDef CONNECTOR_CONFIG_DEF = defineSourceConfig(new ConfigDef(BASE_CONNECTOR_CONFIG_DEF));
|
||||
|
||||
public static void main(String[] args) {
|
||||
System.out.println(defineSourceConfig(new ConfigDef()).toHtml(4, config -> "mirror_source_" + config));
|
||||
|
|
|
@ -34,10 +34,10 @@ import static org.junit.jupiter.api.Assertions.assertNull;
|
|||
|
||||
public class MirrorSourceMetricsTest {
|
||||
|
||||
private final static String SOURCE = "source";
|
||||
private final static String TARGET = "target";
|
||||
private final static TopicPartition TP = new TopicPartition("topic", 0);
|
||||
private final static TopicPartition SOURCE_TP = new TopicPartition(SOURCE + "." + TP.topic(), TP.partition());
|
||||
private static final String SOURCE = "source";
|
||||
private static final String TARGET = "target";
|
||||
private static final TopicPartition TP = new TopicPartition("topic", 0);
|
||||
private static final TopicPartition SOURCE_TP = new TopicPartition(SOURCE + "." + TP.topic(), TP.partition());
|
||||
|
||||
private final Map<String, String> configs = new HashMap<>();
|
||||
private TestReporter reporter;
|
||||
|
|
|
@ -27,7 +27,7 @@ import java.util.Map;
|
|||
*/
|
||||
public class NumberConverterConfig extends ConverterConfig {
|
||||
|
||||
private final static ConfigDef CONFIG = ConverterConfig.newConfigDef();
|
||||
private static final ConfigDef CONFIG = ConverterConfig.newConfigDef();
|
||||
|
||||
public static ConfigDef configDef() {
|
||||
return CONFIG;
|
||||
|
|
|
@ -419,7 +419,7 @@ public class ConnectorConfig extends AbstractConfig {
|
|||
* The abstract method pattern is used to cope with this.
|
||||
* @param <T> The type of plugin (either {@code Transformation} or {@code Predicate}).
|
||||
*/
|
||||
static abstract class EnrichablePlugin<T> {
|
||||
abstract static class EnrichablePlugin<T> {
|
||||
|
||||
private final String aliasKind;
|
||||
private final String aliasConfig;
|
||||
|
|
|
@ -474,7 +474,7 @@ public class ConnectorValidationIntegrationTest {
|
|||
);
|
||||
}
|
||||
|
||||
public static abstract class TestConverter implements Converter, HeaderConverter {
|
||||
public abstract static class TestConverter implements Converter, HeaderConverter {
|
||||
|
||||
// Defined by both Converter and HeaderConverter interfaces
|
||||
@Override
|
||||
|
@ -517,7 +517,7 @@ public class ConnectorValidationIntegrationTest {
|
|||
}
|
||||
}
|
||||
|
||||
public static abstract class AbstractTestConverter extends TestConverter {
|
||||
public abstract static class AbstractTestConverter extends TestConverter {
|
||||
}
|
||||
|
||||
public static class TestConverterWithPrivateConstructor extends TestConverter {
|
||||
|
|
|
@ -52,7 +52,7 @@ public class ConnectorConfigTest<R extends ConnectRecord<R>> {
|
|||
|
||||
private static final SinkRecord DUMMY_RECORD = new SinkRecord(null, 0, null, null, null, null, 0L);
|
||||
|
||||
public static abstract class TestConnector extends Connector {
|
||||
public abstract static class TestConnector extends Connector {
|
||||
}
|
||||
|
||||
public static class SimpleTransformation<R extends ConnectRecord<R>> implements Transformation<R>, Versioned {
|
||||
|
@ -393,7 +393,7 @@ public class ConnectorConfigTest<R extends ConnectRecord<R>> {
|
|||
}
|
||||
}
|
||||
|
||||
public static abstract class AbstractTestPredicate<R extends ConnectRecord<R>> implements Predicate<R>, Versioned {
|
||||
public abstract static class AbstractTestPredicate<R extends ConnectRecord<R>> implements Predicate<R>, Versioned {
|
||||
|
||||
@Override
|
||||
public String version() {
|
||||
|
@ -404,7 +404,7 @@ public class ConnectorConfigTest<R extends ConnectRecord<R>> {
|
|||
|
||||
}
|
||||
|
||||
public static abstract class AbstractTransformation<R extends ConnectRecord<R>> implements Transformation<R>, Versioned {
|
||||
public abstract static class AbstractTransformation<R extends ConnectRecord<R>> implements Transformation<R>, Versioned {
|
||||
|
||||
@Override
|
||||
public String version() {
|
||||
|
@ -413,7 +413,7 @@ public class ConnectorConfigTest<R extends ConnectRecord<R>> {
|
|||
|
||||
}
|
||||
|
||||
public static abstract class AbstractKeyValueTransformation<R extends ConnectRecord<R>> implements Transformation<R>, Versioned {
|
||||
public abstract static class AbstractKeyValueTransformation<R extends ConnectRecord<R>> implements Transformation<R>, Versioned {
|
||||
@Override
|
||||
public R apply(R record) {
|
||||
return null;
|
||||
|
|
|
@ -642,6 +642,6 @@ public class WorkerConnectorTest {
|
|||
}
|
||||
}
|
||||
|
||||
private static abstract class TestConnector extends Connector {
|
||||
private abstract static class TestConnector extends Connector {
|
||||
}
|
||||
}
|
||||
|
|
|
@ -697,7 +697,7 @@ public class WorkerSinkTaskThreadedTest {
|
|||
return new RecordHeaders();
|
||||
}
|
||||
|
||||
private static abstract class TestSinkTask extends SinkTask {
|
||||
private abstract static class TestSinkTask extends SinkTask {
|
||||
}
|
||||
|
||||
@SuppressWarnings("NewClassNamingConvention")
|
||||
|
|
|
@ -283,7 +283,7 @@ public class WorkerTaskTest {
|
|||
assertEquals(runningTimeRatio, metrics.currentMetricValueAsDouble(group.metricGroup(), "running-ratio"), 0.000001d);
|
||||
}
|
||||
|
||||
private static abstract class TestSinkTask extends SinkTask {
|
||||
private abstract static class TestSinkTask extends SinkTask {
|
||||
}
|
||||
|
||||
private static class TestWorkerTask extends WorkerTask<Object, SourceRecord> {
|
||||
|
|
|
@ -4275,10 +4275,10 @@ public class DistributedHerderTest {
|
|||
}
|
||||
|
||||
// We need to use a real class here due to some issue with mocking java.lang.Class
|
||||
private static abstract class BogusSourceConnector extends SourceConnector {
|
||||
private abstract static class BogusSourceConnector extends SourceConnector {
|
||||
}
|
||||
|
||||
private static abstract class BogusSourceTask extends SourceTask {
|
||||
private abstract static class BogusSourceTask extends SourceTask {
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1205,16 +1205,16 @@ public class StandaloneHerderTest {
|
|||
}
|
||||
|
||||
// We need to use a real class here due to some issue with mocking java.lang.Class
|
||||
private static abstract class BogusSourceConnector extends SourceConnector {
|
||||
private abstract static class BogusSourceConnector extends SourceConnector {
|
||||
}
|
||||
|
||||
private static abstract class BogusSourceTask extends SourceTask {
|
||||
private abstract static class BogusSourceTask extends SourceTask {
|
||||
}
|
||||
|
||||
private static abstract class BogusSinkConnector extends SinkConnector {
|
||||
private abstract static class BogusSinkConnector extends SinkConnector {
|
||||
}
|
||||
|
||||
private static abstract class BogusSinkTask extends SourceTask {
|
||||
private abstract static class BogusSinkTask extends SourceTask {
|
||||
}
|
||||
|
||||
private void verifyConnectorStatusRestart() {
|
||||
|
|
|
@ -57,7 +57,7 @@ public class SchemaSourceTask extends SourceTask {
|
|||
private boolean multipleSchema;
|
||||
private int partitionCount;
|
||||
|
||||
private final static Schema VALUE_SCHEMA = SchemaBuilder.struct().version(1).name("record")
|
||||
private static final Schema VALUE_SCHEMA = SchemaBuilder.struct().version(1).name("record")
|
||||
.field("boolean", Schema.BOOLEAN_SCHEMA)
|
||||
.field("int", Schema.INT32_SCHEMA)
|
||||
.field("long", Schema.INT64_SCHEMA)
|
||||
|
@ -68,7 +68,7 @@ public class SchemaSourceTask extends SourceTask {
|
|||
.field("seqno", Schema.INT64_SCHEMA)
|
||||
.build();
|
||||
|
||||
private final static Schema VALUE_SCHEMA_2 = SchemaBuilder.struct().version(2).name("record")
|
||||
private static final Schema VALUE_SCHEMA_2 = SchemaBuilder.struct().version(2).name("record")
|
||||
.field("boolean", Schema.BOOLEAN_SCHEMA)
|
||||
.field("int", Schema.INT32_SCHEMA)
|
||||
.field("long", Schema.INT64_SCHEMA)
|
||||
|
|
|
@ -25,7 +25,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals;
|
|||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
||||
|
||||
class FieldPathNotationTest {
|
||||
final static String[] EMPTY_PATH = new String[] {};
|
||||
static final String[] EMPTY_PATH = new String[] {};
|
||||
|
||||
@Test
|
||||
void shouldBuildV1WithDotsAndBacktickPair() {
|
||||
|
|
|
@ -633,7 +633,7 @@ public class RemoteLogManager implements Closeable {
|
|||
return Optional.empty();
|
||||
}
|
||||
|
||||
private static abstract class CancellableRunnable implements Runnable {
|
||||
private abstract static class CancellableRunnable implements Runnable {
|
||||
private volatile boolean cancelled = false;
|
||||
|
||||
public void cancel() {
|
||||
|
|
|
@ -42,12 +42,12 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
|
|||
*/
|
||||
public class SharePartition {
|
||||
|
||||
private final static Logger log = LoggerFactory.getLogger(SharePartition.class);
|
||||
private static final Logger log = LoggerFactory.getLogger(SharePartition.class);
|
||||
|
||||
/**
|
||||
* empty member id used to indicate when a record is not acquired by any member.
|
||||
*/
|
||||
final static String EMPTY_MEMBER_ID = Uuid.ZERO_UUID.toString();
|
||||
static final String EMPTY_MEMBER_ID = Uuid.ZERO_UUID.toString();
|
||||
|
||||
/**
|
||||
* The RecordState is used to track the state of a record that has been fetched from the leader.
|
||||
|
|
|
@ -37,7 +37,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
|||
*/
|
||||
public class SharePartitionManager implements AutoCloseable {
|
||||
|
||||
private final static Logger log = LoggerFactory.getLogger(SharePartitionManager.class);
|
||||
private static final Logger log = LoggerFactory.getLogger(SharePartitionManager.class);
|
||||
|
||||
/**
|
||||
* The partition cache map is used to store the SharePartition objects for each share group topic-partition.
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue